This commit is contained in:
Lana Steuck 2011-02-03 19:15:30 -08:00
commit 8e2b437a8b
585 changed files with 14055 additions and 21016 deletions

View File

@ -100,3 +100,5 @@ a4e6aa1f45ad23a6f083ed98d970b5006ea4d292 jdk7-b116
142129d8599d1f56b29387e7f9a5fad53b6d61df jdk7-b123 142129d8599d1f56b29387e7f9a5fad53b6d61df jdk7-b123
aa894c225b1a517b665ac2a58295217ea2245134 jdk7-b124 aa894c225b1a517b665ac2a58295217ea2245134 jdk7-b124
f658ec2730fa29323c36d23c27e54c7219ef5e16 jdk7-b125 f658ec2730fa29323c36d23c27e54c7219ef5e16 jdk7-b125
f1df068076986679ea1105532a65529d63a89060 jdk7-b126
f83cd8bd35c678f94e526990e03dc838d0ec2717 jdk7-b127

View File

@ -100,3 +100,5 @@ f1591eed71f64f6eba79fb7426f5616cc4dfea73 jdk7-b122
ed6950da30cf1e8904b4bdb034d471647942271f jdk7-b123 ed6950da30cf1e8904b4bdb034d471647942271f jdk7-b123
024a6755895bf91b5a3c98984c89ee018efbf538 jdk7-b124 024a6755895bf91b5a3c98984c89ee018efbf538 jdk7-b124
5c4df7e992775c102f08e9f1c0a124b324641b70 jdk7-b125 5c4df7e992775c102f08e9f1c0a124b324641b70 jdk7-b125
b566d490905691787f8931f69947a92c67c6d5e4 jdk7-b126
bd70f76b0309068f157ae759c36eac8f2c6d098e jdk7-b127

View File

@ -29,15 +29,6 @@ ifndef TOPDIR
TOPDIR:=. TOPDIR:=.
endif endif
# Openjdk sources (only used if SKIP_OPENJDK_BUILD!=true)
OPENJDK_SOURCETREE=$(TOPDIR)/openjdk
OPENJDK_BUILDDIR:=$(shell \
if [ -r $(OPENJDK_SOURCETREE)/Makefile ]; then \
echo "$(OPENJDK_SOURCETREE)"; \
else \
echo "."; \
fi)
ifndef JDK_TOPDIR ifndef JDK_TOPDIR
JDK_TOPDIR=$(TOPDIR)/jdk JDK_TOPDIR=$(TOPDIR)/jdk
endif endif
@ -70,7 +61,7 @@ include ./make/deploy-rules.gmk
all:: all::
@$(START_ECHO) @$(START_ECHO)
all:: openjdk_check sanity all:: sanity
ifeq ($(SKIP_FASTDEBUG_BUILD), false) ifeq ($(SKIP_FASTDEBUG_BUILD), false)
all:: fastdebug_build all:: fastdebug_build
@ -80,10 +71,6 @@ ifeq ($(SKIP_DEBUG_BUILD), false)
all:: debug_build all:: debug_build
endif endif
ifneq ($(SKIP_OPENJDK_BUILD), true)
all:: openjdk_build
endif
all:: all_product_build all:: all_product_build
all:: all::
@ -267,81 +254,6 @@ product_build:: build_product_image
debug_build:: build_debug_image debug_build:: build_debug_image
fastdebug_build:: build_fastdebug_image fastdebug_build:: build_fastdebug_image
# Check on whether we really can build the openjdk, need source etc.
openjdk_check: FRC
ifneq ($(SKIP_OPENJDK_BUILD), true)
@$(ECHO) " "
@$(ECHO) "================================================="
@if [ ! -r $(OPENJDK_BUILDDIR)/Makefile ] ; then \
$(ECHO) "ERROR: No openjdk source tree available at: $(OPENJDK_BUILDDIR)"; \
exit 1; \
else \
$(ECHO) "OpenJDK will be built after JDK is built"; \
$(ECHO) " OPENJDK_BUILDDIR=$(OPENJDK_BUILDDIR)"; \
fi
@$(ECHO) "================================================="
@$(ECHO) " "
endif
# If we have bundle rules, we have a chance here to do a complete cycle
# build, of production and open build.
# FIXUP: We should create the openjdk source bundle and build that?
# But how do we reliable create or get at a formal openjdk source tree?
# The one we have needs to be trimmed of built bits and closed dirs.
# The repositories might not be available.
# The openjdk source bundle is probably not available.
ifneq ($(SKIP_OPENJDK_BUILD), true)
ifeq ($(BUILD_JDK), true)
ifeq ($(BUNDLE_RULES_AVAILABLE), true)
OPENJDK_OUTPUTDIR=$(ABS_OUTPUTDIR)/open-output
OPENJDK_BUILD_NAME \
= openjdk-$(JDK_MINOR_VERSION)-$(BUILD_NUMBER)-$(PLATFORM)-$(ARCH)-$(BUNDLE_DATE)
OPENJDK_BUILD_BINARY_ZIP=$(ABS_BIN_BUNDLEDIR)/$(OPENJDK_BUILD_NAME).zip
BUILT_IMAGE=$(ABS_OUTPUTDIR)/j2sdk-image
ifeq ($(PLATFORM)$(ARCH_DATA_MODEL),solaris64)
OPENJDK_BOOTDIR=$(BOOTDIR)
OPENJDK_IMPORTJDK=$(JDK_IMPORT_PATH)
else
OPENJDK_BOOTDIR=$(BUILT_IMAGE)
OPENJDK_IMPORTJDK=$(BUILT_IMAGE)
endif
openjdk_build:
@$(START_ECHO)
@$(ECHO) " "
@$(ECHO) "================================================="
@$(ECHO) "Starting openjdk build"
@$(ECHO) " Using: ALT_JDK_DEVTOOLS_DIR=$(JDK_DEVTOOLS_DIR)"
@$(ECHO) "================================================="
@$(ECHO) " "
$(RM) -r $(OPENJDK_OUTPUTDIR)
$(MKDIR) -p $(OPENJDK_OUTPUTDIR)
($(CD) $(OPENJDK_BUILDDIR) && $(MAKE) \
OPENJDK=true \
GENERATE_DOCS=false \
ALT_JDK_DEVTOOLS_DIR=$(JDK_DEVTOOLS_DIR) \
ALT_OUTPUTDIR=$(OPENJDK_OUTPUTDIR) \
ALT_BOOTDIR=$(OPENJDK_BOOTDIR) \
ALT_JDK_IMPORT_PATH=$(OPENJDK_IMPORTJDK) \
product_build )
$(RM) $(OPENJDK_BUILD_BINARY_ZIP)
( $(CD) $(OPENJDK_OUTPUTDIR)/j2sdk-image && \
$(ZIPEXE) -q -r $(OPENJDK_BUILD_BINARY_ZIP) .)
$(RM) -r $(OPENJDK_OUTPUTDIR)
@$(ECHO) " "
@$(ECHO) "================================================="
@$(ECHO) "Finished openjdk build"
@$(ECHO) " Binary Bundle: $(OPENJDK_BUILD_BINARY_ZIP)"
@$(ECHO) "================================================="
@$(ECHO) " "
@$(FINISH_ECHO)
endif
endif
endif
clobber:: clobber::
$(RM) -r $(OUTPUTDIR)/* $(RM) -r $(OUTPUTDIR)/*
$(RM) -r $(OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-debug/* $(RM) -r $(OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-debug/*
@ -448,7 +360,6 @@ CACERTS_FILE.desc = Location of certificates file
DEVTOOLS_PATH.desc = Directory containing zip and gnumake DEVTOOLS_PATH.desc = Directory containing zip and gnumake
CUPS_HEADERS_PATH.desc = Include directory location for CUPS header files CUPS_HEADERS_PATH.desc = Include directory location for CUPS header files
DXSDK_PATH.desc = Root directory of DirectX SDK DXSDK_PATH.desc = Root directory of DirectX SDK
MSVCRT_DLL_PATH.desc = Directory containing mscvrt.dll
# Make variables to print out (description and value) # Make variables to print out (description and value)
VARIABLE_PRINTVAL_LIST += \ VARIABLE_PRINTVAL_LIST += \
@ -477,12 +388,10 @@ VARIABLE_CHECKFIL_LIST += \
ifeq ($(PLATFORM), windows) ifeq ($(PLATFORM), windows)
VARIABLE_PRINTVAL_LIST += \ VARIABLE_PRINTVAL_LIST += \
DXSDK_PATH \ DXSDK_PATH
MSVCRT_DLL_PATH
VARIABLE_CHECKDIR_LIST += \ VARIABLE_CHECKDIR_LIST += \
DXSDK_PATH \ DXSDK_PATH
MSVCRT_DLL_PATH
endif endif

View File

@ -1414,14 +1414,14 @@
but it's normally found via the DirectX environment variable but it's normally found via the DirectX environment variable
<tt>DXSDK_DIR</tt>. <tt>DXSDK_DIR</tt>.
</blockquote> </blockquote>
<strong><a name="msvcrt"><tt>MSVCR100.DLL</tt></a></strong> <strong><a name="msvcrNN"><tt>MSVCR100.DLL</tt></a></strong>
<blockquote> <blockquote>
The OpenJDK build requires access to a redistributable The OpenJDK build requires access to a redistributable
<tt>MSVCR100.DLL</tt>. <tt>MSVCR100.DLL</tt>.
This is usually picked up automatically from the redist This is usually picked up automatically from the redist
directories of Visual Studio 2010. directories of Visual Studio 2010.
If this cannot be found set the If this cannot be found set the
<a href="#ALT_MSVCRT_DLL_PATH"><tt>ALT_MSVCRT_DLL_PATH</tt></a> <a href="#ALT_MSVCRNN_DLL_PATH"><tt>ALT_MSVCRNN_DLL_PATH</tt></a>
variable to the location of this file. variable to the location of this file.
<p> <p>
</blockquote> </blockquote>
@ -1671,15 +1671,10 @@
variable <tt>DXSDK_DIR</tt>, variable <tt>DXSDK_DIR</tt>,
failing that, look in <tt>C:/DXSDK</tt>. failing that, look in <tt>C:/DXSDK</tt>.
</dd> </dd>
<dt><tt><a name="ALT_MSVCRT_DLL_PATH">ALT_MSVCRT_DLL_PATH</a></tt> </dt>
<dd>
The location of the
<a href="#msvcrt"><tt>MSVCRT.DLL</tt></a>.
</dd>
<dt><tt><a name="ALT_MSVCRNN_DLL_PATH">ALT_MSVCRNN_DLL_PATH</a></tt> </dt> <dt><tt><a name="ALT_MSVCRNN_DLL_PATH">ALT_MSVCRNN_DLL_PATH</a></tt> </dt>
<dd> <dd>
The location of the The location of the
<a href="#msvcrt"><tt>MSVCR100.DLL</tt></a>. <a href="#msvcrNN"><tt>MSVCR100.DLL</tt></a>.
</dd> </dd>
</dl> </dl>
</dd> </dd>

View File

@ -100,3 +100,5 @@ cff5a173ec1e89013359e804a3e31736ef6fb462 jdk7-b120
a230c142628cea22475ab9dc5cd544266ddf2466 jdk7-b123 a230c142628cea22475ab9dc5cd544266ddf2466 jdk7-b123
f90b3e014e831eb4f32ef035a1dad2b8ba87949f jdk7-b124 f90b3e014e831eb4f32ef035a1dad2b8ba87949f jdk7-b124
1ce58c72b7892cb813eb920276c7e7f17a1b79fe jdk7-b125 1ce58c72b7892cb813eb920276c7e7f17a1b79fe jdk7-b125
d7532bcd3742f1576dd07ff9fbb535c9c9a276e9 jdk7-b126
64775e83f4df894355f45555f50c410de6727b4e jdk7-b127

View File

@ -141,3 +141,6 @@ f5603a6e50422046ebc0d2f1671d55cb8f1bf1e9 jdk7-b120
0a8e0d4345b37b71ec49dda08ee03b68c4f1b592 jdk7-b124 0a8e0d4345b37b71ec49dda08ee03b68c4f1b592 jdk7-b124
0a8e0d4345b37b71ec49dda08ee03b68c4f1b592 hs20-b05 0a8e0d4345b37b71ec49dda08ee03b68c4f1b592 hs20-b05
e24ab3fa6aafad3efabbe7dba9918c5f461a20b1 jdk7-b125 e24ab3fa6aafad3efabbe7dba9918c5f461a20b1 jdk7-b125
4c851c931d001a882cab809aaf3a55371b919244 jdk7-b126
e24ab3fa6aafad3efabbe7dba9918c5f461a20b1 hs20-b06
102466e70debc4b907afbd7624e34ddb1aafee9f jdk7-b127

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=20 HS_MAJOR_VER=20
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=06 HS_BUILD_NUMBER=07
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7

View File

@ -4104,7 +4104,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
sub(top, t1, t1); // size of tlab's allocated portion sub(top, t1, t1); // size of tlab's allocated portion
incr_allocated_bytes(t1, 0, t2); incr_allocated_bytes(t1, t2, t3);
// refill the tlab with an eden allocation // refill the tlab with an eden allocation
bind(do_refill); bind(do_refill);
@ -4138,19 +4138,14 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
delayed()->nop(); delayed()->nop();
} }
void MacroAssembler::incr_allocated_bytes(Register var_size_in_bytes, void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
int con_size_in_bytes, Register t1, Register t2) {
Register t1) {
// Bump total bytes allocated by this thread // Bump total bytes allocated by this thread
assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
assert_different_registers(var_size_in_bytes, t1); assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
// v8 support has gone the way of the dodo // v8 support has gone the way of the dodo
ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
if (var_size_in_bytes->is_valid()) { add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
add(t1, var_size_in_bytes, t1);
} else {
add(t1, con_size_in_bytes, t1);
}
stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
} }

View File

@ -823,15 +823,23 @@ class Assembler : public AbstractAssembler {
}; };
// test if x is within signed immediate range for nbits // test if x is within signed immediate range for nbits
static bool is_simm(int x, int nbits) { return -( 1 << nbits-1 ) <= x && x < ( 1 << nbits-1 ); } static bool is_simm(intptr_t x, int nbits) { return -( intptr_t(1) << nbits-1 ) <= x && x < ( intptr_t(1) << nbits-1 ); }
// test if -4096 <= x <= 4095 // test if -4096 <= x <= 4095
static bool is_simm13(int x) { return is_simm(x, 13); } static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
static bool is_in_wdisp_range(address a, address b, int nbits) {
intptr_t d = intptr_t(b) - intptr_t(a);
return is_simm(d, nbits + 2);
}
// test if label is in simm16 range in words (wdisp16). // test if label is in simm16 range in words (wdisp16).
bool is_in_wdisp16_range(Label& L) { bool is_in_wdisp16_range(Label& L) {
intptr_t d = intptr_t(pc()) - intptr_t(target(L)); return is_in_wdisp_range(target(L), pc(), 16);
return is_simm(d, 18); }
// test if the distance between two addresses fits in simm30 range in words
static bool is_in_wdisp30_range(address a, address b) {
return is_in_wdisp_range(a, b, 30);
} }
enum ASIs { // page 72, v9 enum ASIs { // page 72, v9
@ -1843,6 +1851,8 @@ class MacroAssembler: public Assembler {
inline void jmp( Register s1, Register s2 ); inline void jmp( Register s1, Register s2 );
inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() ); inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
// Check if the call target is out of wdisp30 range (relative to the code cache)
static inline bool is_far_target(address d);
inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type ); inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type ); inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
inline void callr( Register s1, Register s2 ); inline void callr( Register s1, Register s2 );
@ -2389,7 +2399,8 @@ public:
Label& slow_case // continuation point if fast allocation fails Label& slow_case // continuation point if fast allocation fails
); );
void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
void incr_allocated_bytes(Register var_size_in_bytes, int con_size_in_bytes, Register t1); void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
Register t1, Register t2);
// interface method calling // interface method calling
void lookup_interface_method(Register recv_klass, void lookup_interface_method(Register recv_klass,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -588,10 +588,13 @@ inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L
inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); } inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); } inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
inline bool MacroAssembler::is_far_target(address d) {
return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
}
// Call with a check to see if we need to deal with the added // Call with a check to see if we need to deal with the added
// expense of relocation and if we overflow the displacement // expense of relocation and if we overflow the displacement
// of the quick call instruction./ // of the quick call instruction.
// Check to see if we have to deal with relocations
inline void MacroAssembler::call( address d, relocInfo::relocType rt ) { inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
#ifdef _LP64 #ifdef _LP64
intptr_t disp; intptr_t disp;
@ -603,14 +606,12 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
// Is this address within range of the call instruction? // Is this address within range of the call instruction?
// If not, use the expensive instruction sequence // If not, use the expensive instruction sequence
disp = (intptr_t)d - (intptr_t)pc(); if (is_far_target(d)) {
if ( disp != (intptr_t)(int32_t)disp ) {
relocate(rt); relocate(rt);
AddressLiteral dest(d); AddressLiteral dest(d);
jumpl_to(dest, O7, O7); jumpl_to(dest, O7, O7);
} } else {
else { Assembler::call(d, rt);
Assembler::call( d, rt );
} }
#else #else
Assembler::call( d, rt ); Assembler::call( d, rt );

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2358,6 +2358,8 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->tmp3()->as_register() == G4 && op->tmp3()->as_register() == G4 &&
op->tmp4()->as_register() == O1 && op->tmp4()->as_register() == O1 &&
op->klass()->as_register() == G5, "must be"); op->klass()->as_register() == G5, "must be");
LP64_ONLY( __ signx(op->len()->as_register()); )
if (UseSlowPath || if (UseSlowPath ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {

View File

@ -170,11 +170,13 @@ void C1_MacroAssembler::try_allocate(
Register t2, // temp register Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails Label& slow_case // continuation point if fast allocation fails
) { ) {
RegisterOrConstant size_in_bytes = var_size_in_bytes->is_valid()
? RegisterOrConstant(var_size_in_bytes) : RegisterOrConstant(con_size_in_bytes);
if (UseTLAB) { if (UseTLAB) {
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
} else { } else {
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
incr_allocated_bytes(var_size_in_bytes, con_size_in_bytes, t1); incr_allocated_bytes(size_in_bytes, t1, t2);
} }
} }

View File

@ -343,9 +343,10 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
// returned. // returned.
restore_live_registers(sasm); restore_live_registers(sasm);
__ restore();
__ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); AddressLiteral dest(deopt_blob->unpack_with_reexecution());
__ delayed()->nop(); __ jump_to(dest, O0);
__ delayed()->restore();
__ bind(no_deopt); __ bind(no_deopt);
restore_live_registers(sasm); restore_live_registers(sasm);
@ -461,7 +462,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// get the instance size // get the instance size
__ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
__ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
__ incr_allocated_bytes(G1_obj_size, 0, G3_t1); __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
__ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
__ verify_oop(O0_obj); __ verify_oop(O0_obj);
@ -577,7 +578,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
__ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size
__ incr_allocated_bytes(G1_arr_size, 0, G3_t1); __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);
__ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
__ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1295,16 +1295,13 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Get the method data pointer from the methodOop and set the // Get the method data pointer from the methodOop and set the
// specified register to its value. // specified register to its value.
void InterpreterMacroAssembler::set_method_data_pointer_offset(Register Roff) { void InterpreterMacroAssembler::set_method_data_pointer() {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Label get_continue; Label get_continue;
ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
test_method_data_pointer(get_continue); test_method_data_pointer(get_continue);
add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
if (Roff != noreg)
// Roff contains a method data index ("mdi"). It defaults to zero.
add(ImethodDataPtr, Roff, ImethodDataPtr);
bind(get_continue); bind(get_continue);
} }
@ -1315,10 +1312,11 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
Label zero_continue; Label zero_continue;
// Test MDO to avoid the call if it is NULL. // Test MDO to avoid the call if it is NULL.
ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr); ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
test_method_data_pointer(zero_continue); test_method_data_pointer(zero_continue);
call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
set_method_data_pointer_offset(O0); add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
add(ImethodDataPtr, O0, ImethodDataPtr);
bind(zero_continue); bind(zero_continue);
} }
@ -1369,7 +1367,6 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
} }
void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
Register cur_bcp,
Register Rtmp, Register Rtmp,
Label &profile_continue) { Label &profile_continue) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
@ -1400,8 +1397,8 @@ void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocat
delayed()->nop(); delayed()->nop();
// Build it now. // Build it now.
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), cur_bcp); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
set_method_data_pointer_offset(O0); set_method_data_pointer_for_bcp();
ba(false, profile_continue); ba(false, profile_continue);
delayed()->nop(); delayed()->nop();
bind(done); bind(done);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -269,12 +269,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
#ifndef CC_INTERP #ifndef CC_INTERP
// Interpreter profiling operations // Interpreter profiling operations
void set_method_data_pointer() { set_method_data_pointer_offset(noreg); } void set_method_data_pointer();
void set_method_data_pointer_for_bcp(); void set_method_data_pointer_for_bcp();
void set_method_data_pointer_offset(Register mdi_reg);
void test_method_data_pointer(Label& zero_continue); void test_method_data_pointer(Label& zero_continue);
void verify_method_data_pointer(); void verify_method_data_pointer();
void test_invocation_counter_for_mdp(Register invocation_count, Register cur_bcp, Register Rtmp, Label &profile_continue); void test_invocation_counter_for_mdp(Register invocation_count, Register Rtmp, Label &profile_continue);
void set_mdp_data_at(int constant, Register value); void set_mdp_data_at(int constant, Register value);
void increment_mdp_data_at(Address counter, Register bumped_count, void increment_mdp_data_at(Address counter, Register bumped_count,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -395,7 +395,7 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
// //
// Generate an "entry" field for a method handle. // Generate an "entry" field for a method handle.
// This determines how the method handle will respond to calls. // This determines how the method handle will respond to calls.
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek, TRAPS) { void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
// Here is the register state during an interpreted call, // Here is the register state during an interpreted call,
// as set up by generate_method_handle_interpreter_entry(): // as set up by generate_method_handle_interpreter_entry():
// - G5: garbage temp (was MethodHandle.invoke methodOop, unused) // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
@ -447,8 +447,9 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// exception. Since we use a C2I adapter to set up the // exception. Since we use a C2I adapter to set up the
// interpreter state, arguments are expected in compiler // interpreter state, arguments are expected in compiler
// argument registers. // argument registers.
methodHandle mh(raise_exception_method()); assert(raise_exception_method(), "must be set");
address c2i_entry = methodOopDesc::make_adapters(mh, CATCH); address c2i_entry = raise_exception_method()->get_c2i_entry();
assert(c2i_entry, "method must be linked");
__ mov(O5_savedSP, SP); // Cut the stack back to where the caller started. __ mov(O5_savedSP, SP); // Cut the stack back to where the caller started.

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -575,7 +575,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
int MachCallRuntimeNode::ret_addr_offset() { int MachCallRuntimeNode::ret_addr_offset() {
#ifdef _LP64 #ifdef _LP64
return NativeFarCall::instruction_size; // farcall; delay slot if (MacroAssembler::is_far_target(entry_point())) {
return NativeFarCall::instruction_size;
} else {
return NativeCall::instruction_size;
}
#else #else
return NativeCall::instruction_size; // call; delay slot return NativeCall::instruction_size; // call; delay slot
#endif #endif
@ -941,7 +945,7 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
#endif #endif
} }
void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) { void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) {
// The method which records debug information at every safepoint // The method which records debug information at every safepoint
// expects the call to be the first instruction in the snippet as // expects the call to be the first instruction in the snippet as
// it creates a PcDesc structure which tracks the offset of a call // it creates a PcDesc structure which tracks the offset of a call
@ -963,20 +967,7 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocTyp
int startpos = __ offset(); int startpos = __ offset();
#endif /* ASSERT */ #endif /* ASSERT */
#ifdef _LP64 __ call((address)entry_point, rtype);
// Calls to the runtime or native may not be reachable from compiled code,
// so we generate the far call sequence on 64 bit sparc.
// This code sequence is relocatable to any address, even on LP64.
if ( force_far_call ) {
__ relocate(rtype);
AddressLiteral dest(entry_point);
__ jumpl_to(dest, O7, O7);
}
else
#endif
{
__ call((address)entry_point, rtype);
}
if (preserve_g2) __ delayed()->mov(G2, L7); if (preserve_g2) __ delayed()->mov(G2, L7);
else __ delayed()->nop(); else __ delayed()->nop();
@ -2507,7 +2498,7 @@ encode %{
// CALL directly to the runtime // CALL directly to the runtime
// The user of this is responsible for ensuring that R_L7 is empty (killed). // The user of this is responsible for ensuring that R_L7 is empty (killed).
emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type, emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type,
/*preserve_g2=*/true, /*force far call*/true); /*preserve_g2=*/true);
%} %}
enc_class preserve_SP %{ enc_class preserve_SP %{

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1364,15 +1364,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// We have decided to profile this method in the interpreter // We have decided to profile this method in the interpreter
__ bind(profile_method); __ bind(profile_method);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
__ set_method_data_pointer_for_bcp();
#ifdef ASSERT
__ tst(O0);
__ breakpoint_trap(Assembler::notEqual);
#endif
__ set_method_data_pointer();
__ ba(false, profile_method_continue); __ ba(false, profile_method_continue);
__ delayed()->nop(); __ delayed()->nop();
} }

View File

@ -1689,7 +1689,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const Register G4_invoke_ctr = G4; const Register G4_invoke_ctr = G4;
__ increment_backedge_counter(G4_invoke_ctr, G1_scratch); __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward); __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
} }
@ -3447,7 +3447,8 @@ void TemplateTable::_new() {
__ delayed()->nop(); __ delayed()->nop();
// bump total bytes allocated by this thread // bump total bytes allocated by this thread
__ incr_allocated_bytes(Roffset, 0, G1_scratch); // RoldTopValue and RtopAddr are dead, so can use G1 and G3
__ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
} }
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1641,12 +1641,14 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
} }
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
Register len = op->len()->as_register();
LP64_ONLY( __ movslq(len, len); )
if (UseSlowPath || if (UseSlowPath ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
__ jmp(*op->stub()->entry()); __ jmp(*op->stub()->entry());
} else { } else {
Register len = op->len()->as_register();
Register tmp1 = op->tmp1()->as_register(); Register tmp1 = op->tmp1()->as_register();
Register tmp2 = op->tmp2()->as_register(); Register tmp2 = op->tmp2()->as_register();
Register tmp3 = op->tmp3()->as_register(); Register tmp3 = op->tmp3()->as_register();

View File

@ -62,7 +62,7 @@ define_pd_global(intx, StackRedPages, 1);
// due to lack of optimization caused by C++ compiler bugs // due to lack of optimization caused by C++ compiler bugs
define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2)); define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
#else #else
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1)); define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+5));
#endif // AMD64 #endif // AMD64
define_pd_global(intx, PreInflateSpin, 10); define_pd_global(intx, PreInflateSpin, 10);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -819,7 +819,7 @@ void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& ze
// Set the method data pointer for the current bcp. // Set the method data pointer for the current bcp.
void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Label zero_continue; Label set_mdp;
push(rax); push(rax);
push(rbx); push(rbx);
@ -827,21 +827,17 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
// Test MDO to avoid the call if it is NULL. // Test MDO to avoid the call if it is NULL.
movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
testptr(rax, rax); testptr(rax, rax);
jcc(Assembler::zero, zero_continue); jcc(Assembler::zero, set_mdp);
// rbx,: method // rbx,: method
// rsi: bcp // rsi: bcp
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi); call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi);
// rax,: mdi // rax,: mdi
// mdo is guaranteed to be non-zero here, we checked for it before the call.
movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
testptr(rbx, rbx);
jcc(Assembler::zero, zero_continue);
addptr(rbx, in_bytes(methodDataOopDesc::data_offset())); addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
addptr(rbx, rax); addptr(rax, rbx);
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); bind(set_mdp);
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
bind(zero_continue);
pop(rbx); pop(rbx);
pop(rax); pop(rax);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -855,7 +855,7 @@ void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
// Set the method data pointer for the current bcp. // Set the method data pointer for the current bcp.
void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Label zero_continue; Label set_mdp;
push(rax); push(rax);
push(rbx); push(rbx);
@ -863,21 +863,17 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
// Test MDO to avoid the call if it is NULL. // Test MDO to avoid the call if it is NULL.
movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
testptr(rax, rax); testptr(rax, rax);
jcc(Assembler::zero, zero_continue); jcc(Assembler::zero, set_mdp);
// rbx: method // rbx: method
// r13: bcp // r13: bcp
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13); call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
// rax: mdi // rax: mdi
// mdo is guaranteed to be non-zero here, we checked for it before the call.
movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
testptr(rbx, rbx);
jcc(Assembler::zero, zero_continue);
addptr(rbx, in_bytes(methodDataOopDesc::data_offset())); addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
addptr(rbx, rax); addptr(rax, rbx);
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); bind(set_mdp);
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
bind(zero_continue);
pop(rbx); pop(rbx);
pop(rax); pop(rax);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -390,7 +390,7 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
// //
// Generate an "entry" field for a method handle. // Generate an "entry" field for a method handle.
// This determines how the method handle will respond to calls. // This determines how the method handle will respond to calls.
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek, TRAPS) { void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
// Here is the register state during an interpreted call, // Here is the register state during an interpreted call,
// as set up by generate_method_handle_interpreter_entry(): // as set up by generate_method_handle_interpreter_entry():
// - rbx: garbage temp (was MethodHandle.invoke methodOop, unused) // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
@ -451,8 +451,9 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// exception. Since we use a C2I adapter to set up the // exception. Since we use a C2I adapter to set up the
// interpreter state, arguments are expected in compiler // interpreter state, arguments are expected in compiler
// argument registers. // argument registers.
methodHandle mh(raise_exception_method()); assert(raise_exception_method(), "must be set");
address c2i_entry = methodOopDesc::make_adapters(mh, CHECK); address c2i_entry = raise_exception_method()->get_c2i_entry();
assert(c2i_entry, "method must be linked");
const Register rdi_pc = rax; const Register rdi_pc = rax;
__ pop(rdi_pc); // caller PC __ pop(rdi_pc); // caller PC

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1367,15 +1367,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
// We have decided to profile this method in the interpreter // We have decided to profile this method in the interpreter
__ bind(profile_method); __ bind(profile_method);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true); __ set_method_data_pointer_for_bcp();
__ get_method(rbx);
__ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
__ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
__ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
__ test_method_data_pointer(rax, profile_method_continue);
__ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
__ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
__ jmp(profile_method_continue); __ jmp(profile_method_continue);
} }
// Handle overflow of counter and compile method // Handle overflow of counter and compile method

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1383,20 +1383,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
// We have decided to profile this method in the interpreter // We have decided to profile this method in the interpreter
__ bind(profile_method); __ bind(profile_method);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
__ call_VM(noreg, __ set_method_data_pointer_for_bcp();
CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), __ get_method(rbx);
r13, true);
__ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
__ movptr(rax, Address(rbx,
in_bytes(methodOopDesc::method_data_offset())));
__ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
rax);
__ test_method_data_pointer(rax, profile_method_continue);
__ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
__ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
rax);
__ jmp(profile_method_continue); __ jmp(profile_method_continue);
} }
// Handle overflow of counter and compile method // Handle overflow of counter and compile method

View File

@ -1665,16 +1665,9 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
// Out-of-line code to allocate method data oop. // Out-of-line code to allocate method data oop.
__ bind(profile_method); __ bind(profile_method);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
__ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
__ movptr(rcx, Address(rbp, method_offset)); __ set_method_data_pointer_for_bcp();
__ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
__ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
__ test_method_data_pointer(rcx, dispatch);
// offset non-null mdp by MDO::data_offset() + IR::profile_method()
__ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
__ addptr(rcx, rax);
__ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
__ jmp(dispatch); __ jmp(dispatch);
} }

View File

@ -1695,21 +1695,9 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
// Out-of-line code to allocate method data oop. // Out-of-line code to allocate method data oop.
__ bind(profile_method); __ bind(profile_method);
__ call_VM(noreg, __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
CAST_FROM_FN_PTR(address,
InterpreterRuntime::profile_method), r13);
__ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
__ movptr(rcx, Address(rbp, method_offset)); __ set_method_data_pointer_for_bcp();
__ movptr(rcx, Address(rcx,
in_bytes(methodOopDesc::method_data_offset())));
__ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
rcx);
__ test_method_data_pointer(rcx, dispatch);
// offset non-null mdp by MDO::data_offset() + IR::profile_method()
__ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
__ addptr(rcx, rax);
__ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
rcx);
__ jmp(dispatch); __ jmp(dispatch);
} }

View File

@ -1610,10 +1610,9 @@ int os::current_process_id() {
const char* os::dll_file_extension() { return ".so"; } const char* os::dll_file_extension() { return ".so"; }
const char* os::get_temp_directory() { // This must be hard coded because it's the system's temporary
const char *prop = Arguments::get_property("java.io.tmpdir"); // directory not the java application's temp directory, ala java.io.tmpdir.
return prop == NULL ? "/tmp" : prop; const char* os::get_temp_directory() { return "/tmp"; }
}
static bool file_exists(const char* filename) { static bool file_exists(const char* filename) {
struct stat statbuf; struct stat statbuf;

View File

@ -1884,10 +1884,9 @@ void os::set_error_file(const char *logfile) {}
const char* os::dll_file_extension() { return ".so"; } const char* os::dll_file_extension() { return ".so"; }
const char* os::get_temp_directory() { // This must be hard coded because it's the system's temporary
const char *prop = Arguments::get_property("java.io.tmpdir"); // directory not the java application's temp directory, ala java.io.tmpdir.
return prop == NULL ? "/tmp" : prop; const char* os::get_temp_directory() { return "/tmp"; }
}
static bool file_exists(const char* filename) { static bool file_exists(const char* filename) {
struct stat statbuf; struct stat statbuf;

View File

@ -1044,9 +1044,9 @@ os::closedir(DIR *dirp)
return 0; return 0;
} }
// This must be hard coded because it's the system's temporary
// directory not the java application's temp directory, ala java.io.tmpdir.
const char* os::get_temp_directory() { const char* os::get_temp_directory() {
const char *prop = Arguments::get_property("java.io.tmpdir");
if (prop != 0) return prop;
static char path_buf[MAX_PATH]; static char path_buf[MAX_PATH];
if (GetTempPath(MAX_PATH, path_buf)>0) if (GetTempPath(MAX_PATH, path_buf)>0)
return path_buf; return path_buf;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -54,6 +54,8 @@ inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest);
inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jint Atomic::add (jint add_value, volatile jint* dest) { inline jint Atomic::add (jint add_value, volatile jint* dest) {
intptr_t rv; intptr_t rv;
__asm__ volatile( __asm__ volatile(

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -100,11 +100,6 @@ inline jint Atomic::cmpxchg (jint exchange_value, volatile jint*
return exchange_value; return exchange_value;
} }
extern "C" {
// defined in linux_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
}
#ifdef AMD64 #ifdef AMD64
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
@ -164,9 +159,9 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
} }
#else inline jlong Atomic::load(volatile jlong* src) { return *src; }
//inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
//inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } #else // !AMD64
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
@ -189,6 +184,12 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
} }
extern "C" {
// defined in linux_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) {
return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP()); return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
} }
@ -200,6 +201,21 @@ inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t*
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
} }
inline jlong Atomic::load(volatile jlong* src) {
volatile jlong dest;
_Atomic_move_long(src, &dest);
return dest;
}
inline void Atomic::store(jlong store_value, jlong* dest) {
_Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
}
inline void Atomic::store(jlong store_value, volatile jlong* dest) {
_Atomic_move_long((volatile jlong*)&store_value, dest);
}
#endif // AMD64 #endif // AMD64
#endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP #endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -38,6 +38,7 @@
.globl _mmx_Copy_arrayof_conjoint_jshorts .globl _mmx_Copy_arrayof_conjoint_jshorts
.globl _Atomic_cmpxchg_long .globl _Atomic_cmpxchg_long
.globl _Atomic_move_long
.text .text
@ -653,3 +654,15 @@ _Atomic_cmpxchg_long:
popl %ebx popl %ebx
ret ret
# Support for jlong Atomic::load and Atomic::store.
# void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
.p2align 4,,15
.type _Atomic_move_long,@function
_Atomic_move_long:
movl 4(%esp), %eax # src
fildll (%eax)
movl 8(%esp), %eax # dest
fistpll (%eax)
ret

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "vm_version_x86.hpp" #include "vm_version_x86.hpp"
@ -64,11 +65,11 @@ inline void OrderAccess::fence() {
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return *p; } inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return *p; } inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; } inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
@ -79,11 +80,11 @@ inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { *p = v; } inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { *p = v; } inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; } inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
@ -178,7 +179,7 @@ inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
#else #else
*p = v; fence(); release_store(p, v); fence();
#endif // AMD64 #endif // AMD64
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,14 +35,12 @@
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
@ -54,8 +52,49 @@ inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest);
inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); }
#ifdef _LP64
inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline jlong Atomic::load(volatile jlong* src) { return *src; } inline jlong Atomic::load(volatile jlong* src) { return *src; }
#else
extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
#ifdef COMPILER2
// Compiler2 does not support v8, it is used only for v9.
assert (VM_Version::v9_instructions_work(), "only supported on v9");
_Atomic_move_long_v9(src, dst);
#else
// The branch is cheaper then emulated LDD.
if (VM_Version::v9_instructions_work()) {
_Atomic_move_long_v9(src, dst);
} else {
_Atomic_move_long_v8(src, dst);
}
#endif
}
inline jlong Atomic::load(volatile jlong* src) {
volatile jlong dest;
Atomic_move_long(src, &dest);
return dest;
}
inline void Atomic::store(jlong store_value, jlong* dest) {
Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
}
inline void Atomic::store(jlong store_value, volatile jlong* dest) {
Atomic_move_long((volatile jlong*)&store_value, dest);
}
#endif
#ifdef _GNU_SOURCE #ifdef _GNU_SOURCE
inline jint Atomic::add (jint add_value, volatile jint* dest) { inline jint Atomic::add (jint add_value, volatile jint* dest) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -77,11 +77,11 @@ inline void OrderAccess::fence() {
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return *p; } inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return *p; } inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; } inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
@ -92,11 +92,11 @@ inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { *p = v; } inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { *p = v; } inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; } inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
@ -120,11 +120,11 @@ inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v;
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -152,6 +152,39 @@
.nonvolatile .nonvolatile
.end .end
// Support for jlong Atomic::load and Atomic::store on v8.
//
// void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
//
// Arguments:
// src: O0
// dest: O1
//
// Overwrites O2 and O3
.inline _Atomic_move_long_v8,2
.volatile
ldd [%o0], %o2
std %o2, [%o1]
.nonvolatile
.end
// Support for jlong Atomic::load and Atomic::store on v9.
//
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
//
// Arguments:
// src: O0
// dest: O1
//
// Overwrites O2
.inline _Atomic_move_long_v9,2
.volatile
ldx [%o0], %o2
stx %o2, [%o1]
.nonvolatile
.end
// Support for jint Atomic::add(jint add_value, volatile jint* dest). // Support for jint Atomic::add(jint add_value, volatile jint* dest).
// //

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -151,14 +151,22 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
} }
extern "C" void _Atomic_load_long(volatile jlong* src, volatile jlong* dst); extern "C" void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
inline jlong Atomic::load(volatile jlong* src) { inline jlong Atomic::load(volatile jlong* src) {
volatile jlong dest; volatile jlong dest;
_Atomic_load_long(src, &dest); _Atomic_move_long(src, &dest);
return dest; return dest;
} }
inline void Atomic::store(jlong store_value, jlong* dest) {
_Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
}
inline void Atomic::store(jlong store_value, volatile jlong* dest) {
_Atomic_move_long((volatile jlong*)&store_value, dest);
}
#endif // AMD64 #endif // AMD64
#ifdef _GNU_SOURCE #ifdef _GNU_SOURCE

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP #ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
#define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP #define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "vm_version_x86.hpp" #include "vm_version_x86.hpp"
@ -80,11 +81,11 @@ extern "C" {
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return *p; } inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return *p; } inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; } inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
@ -95,11 +96,11 @@ inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { *p = v; } inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { *p = v; } inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; } inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
@ -123,11 +124,11 @@ inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v;
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -104,8 +104,9 @@
popl %ebx popl %ebx
.end .end
// Support for void Atomic::load(volatile jlong* src, volatile jlong* dest). // Support for jlong Atomic::load and Atomic::store.
.inline _Atomic_load_long,2 // void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
.inline _Atomic_move_long,2
movl 0(%esp), %eax // src movl 0(%esp), %eax // src
fildll (%eax) fildll (%eax)
movl 4(%esp), %eax // dest movl 4(%esp), %eax // dest

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -137,10 +137,10 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
} }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
#else // !AMD64 #else // !AMD64
//inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
//inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline jint Atomic::add (jint add_value, volatile jint* dest) { inline jint Atomic::add (jint add_value, volatile jint* dest) {
int mp = os::is_MP(); int mp = os::is_MP();
__asm { __asm {
@ -254,6 +254,33 @@ inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t*
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
} }
inline jlong Atomic::load(volatile jlong* src) {
volatile jlong dest;
volatile jlong* pdest = &dest;
__asm {
mov eax, src
fild qword ptr [eax]
mov eax, pdest
fistp qword ptr [eax]
}
return dest;
}
inline void Atomic::store(jlong store_value, volatile jlong* dest) {
volatile jlong* src = &store_value;
__asm {
mov eax, src
fild qword ptr [eax]
mov eax, dest
fistp qword ptr [eax]
}
}
inline void Atomic::store(jlong store_value, jlong* dest) {
Atomic::store(store_value, (volatile jlong*)dest);
}
#endif // AMD64 #endif // AMD64
#pragma warning(default: 4035) // Enables warnings reporting missing return statement #pragma warning(default: 4035) // Enables warnings reporting missing return statement

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
#define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "vm_version_x86.hpp" #include "vm_version_x86.hpp"
@ -65,11 +66,11 @@ inline void OrderAccess::fence() {
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return *p; } inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return *p; } inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; } inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
@ -80,11 +81,11 @@ inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { *p = v; } inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { *p = v; } inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; } inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
@ -188,7 +189,7 @@ inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
#endif // AMD64 #endif // AMD64
} }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { *p = v; fence(); } inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); } inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); } inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -319,24 +319,24 @@ void BlockListBuilder::set_leaders() {
case Bytecodes::_tableswitch: { case Bytecodes::_tableswitch: {
// set block for each case // set block for each case
Bytecode_tableswitch *switch_ = Bytecode_tableswitch_at(s.cur_bcp()); Bytecode_tableswitch sw(&s);
int l = switch_->length(); int l = sw.length();
for (int i = 0; i < l; i++) { for (int i = 0; i < l; i++) {
make_block_at(cur_bci + switch_->dest_offset_at(i), current); make_block_at(cur_bci + sw.dest_offset_at(i), current);
} }
make_block_at(cur_bci + switch_->default_offset(), current); make_block_at(cur_bci + sw.default_offset(), current);
current = NULL; current = NULL;
break; break;
} }
case Bytecodes::_lookupswitch: { case Bytecodes::_lookupswitch: {
// set block for each case // set block for each case
Bytecode_lookupswitch *switch_ = Bytecode_lookupswitch_at(s.cur_bcp()); Bytecode_lookupswitch sw(&s);
int l = switch_->number_of_pairs(); int l = sw.number_of_pairs();
for (int i = 0; i < l; i++) { for (int i = 0; i < l; i++) {
make_block_at(cur_bci + switch_->pair_at(i)->offset(), current); make_block_at(cur_bci + sw.pair_at(i).offset(), current);
} }
make_block_at(cur_bci + switch_->default_offset(), current); make_block_at(cur_bci + sw.default_offset(), current);
current = NULL; current = NULL;
break; break;
} }
@ -1275,15 +1275,15 @@ void GraphBuilder::ret(int local_index) {
void GraphBuilder::table_switch() { void GraphBuilder::table_switch() {
Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(method()->code() + bci()); Bytecode_tableswitch sw(stream());
const int l = switch_->length(); const int l = sw.length();
if (CanonicalizeNodes && l == 1) { if (CanonicalizeNodes && l == 1) {
// total of 2 successors => use If instead of switch // total of 2 successors => use If instead of switch
// Note: This code should go into the canonicalizer as soon as it can // Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node. // can handle canonicalized forms that contain more than one node.
Value key = append(new Constant(new IntConstant(switch_->low_key()))); Value key = append(new Constant(new IntConstant(sw.low_key())));
BlockBegin* tsux = block_at(bci() + switch_->dest_offset_at(0)); BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
BlockBegin* fsux = block_at(bci() + switch_->default_offset()); BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
ValueStack* state_before = is_bb ? copy_state_before() : NULL; ValueStack* state_before = is_bb ? copy_state_before() : NULL;
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
@ -1293,29 +1293,29 @@ void GraphBuilder::table_switch() {
int i; int i;
bool has_bb = false; bool has_bb = false;
for (i = 0; i < l; i++) { for (i = 0; i < l; i++) {
sux->at_put(i, block_at(bci() + switch_->dest_offset_at(i))); sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
if (switch_->dest_offset_at(i) < 0) has_bb = true; if (sw.dest_offset_at(i) < 0) has_bb = true;
} }
// add default successor // add default successor
sux->at_put(i, block_at(bci() + switch_->default_offset())); sux->at_put(i, block_at(bci() + sw.default_offset()));
ValueStack* state_before = has_bb ? copy_state_before() : NULL; ValueStack* state_before = has_bb ? copy_state_before() : NULL;
append(new TableSwitch(ipop(), sux, switch_->low_key(), state_before, has_bb)); append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
} }
} }
void GraphBuilder::lookup_switch() { void GraphBuilder::lookup_switch() {
Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(method()->code() + bci()); Bytecode_lookupswitch sw(stream());
const int l = switch_->number_of_pairs(); const int l = sw.number_of_pairs();
if (CanonicalizeNodes && l == 1) { if (CanonicalizeNodes && l == 1) {
// total of 2 successors => use If instead of switch // total of 2 successors => use If instead of switch
// Note: This code should go into the canonicalizer as soon as it can // Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node. // can handle canonicalized forms that contain more than one node.
// simplify to If // simplify to If
LookupswitchPair* pair = switch_->pair_at(0); LookupswitchPair pair = sw.pair_at(0);
Value key = append(new Constant(new IntConstant(pair->match()))); Value key = append(new Constant(new IntConstant(pair.match())));
BlockBegin* tsux = block_at(bci() + pair->offset()); BlockBegin* tsux = block_at(bci() + pair.offset());
BlockBegin* fsux = block_at(bci() + switch_->default_offset()); BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
ValueStack* state_before = is_bb ? copy_state_before() : NULL; ValueStack* state_before = is_bb ? copy_state_before() : NULL;
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
@ -1326,13 +1326,13 @@ void GraphBuilder::lookup_switch() {
int i; int i;
bool has_bb = false; bool has_bb = false;
for (i = 0; i < l; i++) { for (i = 0; i < l; i++) {
LookupswitchPair* pair = switch_->pair_at(i); LookupswitchPair pair = sw.pair_at(i);
if (pair->offset() < 0) has_bb = true; if (pair.offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + pair->offset())); sux->at_put(i, block_at(bci() + pair.offset()));
keys->at_put(i, pair->match()); keys->at_put(i, pair.match());
} }
// add default successor // add default successor
sux->at_put(i, block_at(bci() + switch_->default_offset())); sux->at_put(i, block_at(bci() + sw.default_offset()));
ValueStack* state_before = has_bb ? copy_state_before() : NULL; ValueStack* state_before = has_bb ? copy_state_before() : NULL;
append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb)); append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
} }

View File

@ -1990,9 +1990,8 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
LIR_Opr reg = reg = rlock_result(x, x->basic_type()); LIR_Opr reg = reg = rlock_result(x, x->basic_type());
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile()); get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
if (x->is_volatile() && os::is_MP()) __ membar(); if (x->is_volatile() && os::is_MP()) __ membar_acquire();
} }
@ -2014,6 +2013,7 @@ void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
if (x->is_volatile() && os::is_MP()) __ membar_release(); if (x->is_volatile() && os::is_MP()) __ membar_release();
put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
if (x->is_volatile() && os::is_MP()) __ membar();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -369,7 +369,7 @@ static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, meth
if (branch_bci != InvocationEntryBci) { if (branch_bci != InvocationEntryBci) {
// Compute desination bci // Compute desination bci
address pc = method()->code_base() + branch_bci; address pc = method()->code_base() + branch_bci;
Bytecodes::Code branch = Bytecodes::code_at(pc, method()); Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
int offset = 0; int offset = 0;
switch (branch) { switch (branch) {
case Bytecodes::_if_icmplt: case Bytecodes::_iflt: case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
@ -659,14 +659,14 @@ JRT_END
static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
Bytecode_field* field_access = Bytecode_field_at(caller, bci); Bytecode_field field_access(caller, bci);
// This can be static or non-static field access // This can be static or non-static field access
Bytecodes::Code code = field_access->code(); Bytecodes::Code code = field_access.code();
// We must load class, initialize class and resolvethe field // We must load class, initialize class and resolvethe field
FieldAccessInfo result; // initialize class if needed FieldAccessInfo result; // initialize class if needed
constantPoolHandle constants(THREAD, caller->constants()); constantPoolHandle constants(THREAD, caller->constants());
LinkResolver::resolve_field(result, constants, field_access->index(), Bytecodes::java_code(code), false, CHECK_NULL); LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
return result.klass()(); return result.klass()();
} }
@ -767,7 +767,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
Events::log("patch_code @ " INTPTR_FORMAT , caller_frame.pc()); Events::log("patch_code @ " INTPTR_FORMAT , caller_frame.pc());
Bytecodes::Code code = Bytecode_at(caller_method->bcp_from(bci))->java_code(); Bytecodes::Code code = caller_method()->java_code_at(bci);
#ifndef PRODUCT #ifndef PRODUCT
// this is used by assertions in the access_field_patching_id // this is used by assertions in the access_field_patching_id
@ -779,11 +779,11 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code
if (stub_id == Runtime1::access_field_patching_id) { if (stub_id == Runtime1::access_field_patching_id) {
Bytecode_field* field_access = Bytecode_field_at(caller_method, bci); Bytecode_field field_access(caller_method, bci);
FieldAccessInfo result; // initialize class if needed FieldAccessInfo result; // initialize class if needed
Bytecodes::Code code = field_access->code(); Bytecodes::Code code = field_access.code();
constantPoolHandle constants(THREAD, caller_method->constants()); constantPoolHandle constants(THREAD, caller_method->constants());
LinkResolver::resolve_field(result, constants, field_access->index(), Bytecodes::java_code(code), false, CHECK); LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
patch_field_offset = result.field_offset(); patch_field_offset = result.field_offset();
// If we're patching a field which is volatile then at compile it // If we're patching a field which is volatile then at compile it
@ -811,36 +811,36 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
} }
break; break;
case Bytecodes::_new: case Bytecodes::_new:
{ Bytecode_new* bnew = Bytecode_new_at(caller_method->bcp_from(bci)); { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
k = caller_method->constants()->klass_at(bnew->index(), CHECK); k = caller_method->constants()->klass_at(bnew.index(), CHECK);
} }
break; break;
case Bytecodes::_multianewarray: case Bytecodes::_multianewarray:
{ Bytecode_multianewarray* mna = Bytecode_multianewarray_at(caller_method->bcp_from(bci)); { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
k = caller_method->constants()->klass_at(mna->index(), CHECK); k = caller_method->constants()->klass_at(mna.index(), CHECK);
} }
break; break;
case Bytecodes::_instanceof: case Bytecodes::_instanceof:
{ Bytecode_instanceof* io = Bytecode_instanceof_at(caller_method->bcp_from(bci)); { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
k = caller_method->constants()->klass_at(io->index(), CHECK); k = caller_method->constants()->klass_at(io.index(), CHECK);
} }
break; break;
case Bytecodes::_checkcast: case Bytecodes::_checkcast:
{ Bytecode_checkcast* cc = Bytecode_checkcast_at(caller_method->bcp_from(bci)); { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
k = caller_method->constants()->klass_at(cc->index(), CHECK); k = caller_method->constants()->klass_at(cc.index(), CHECK);
} }
break; break;
case Bytecodes::_anewarray: case Bytecodes::_anewarray:
{ Bytecode_anewarray* anew = Bytecode_anewarray_at(caller_method->bcp_from(bci)); { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
klassOop ek = caller_method->constants()->klass_at(anew->index(), CHECK); klassOop ek = caller_method->constants()->klass_at(anew.index(), CHECK);
k = Klass::cast(ek)->array_klass(CHECK); k = Klass::cast(ek)->array_klass(CHECK);
} }
break; break;
case Bytecodes::_ldc: case Bytecodes::_ldc:
case Bytecodes::_ldc_w: case Bytecodes::_ldc_w:
{ {
Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci); Bytecode_loadconstant cc(caller_method, bci);
k = cc->resolve_constant(CHECK); k = cc.resolve_constant(CHECK);
assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant"); assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant");
} }
break; break;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -761,15 +761,15 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
case Bytecodes::_tableswitch: case Bytecodes::_tableswitch:
{ {
state.spop(); state.spop();
Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(s.cur_bcp()); Bytecode_tableswitch sw(&s);
int len = switch_->length(); int len = sw.length();
int dest_bci; int dest_bci;
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
dest_bci = s.cur_bci() + switch_->dest_offset_at(i); dest_bci = s.cur_bci() + sw.dest_offset_at(i);
assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
successors.push(_methodBlocks->block_containing(dest_bci)); successors.push(_methodBlocks->block_containing(dest_bci));
} }
dest_bci = s.cur_bci() + switch_->default_offset(); dest_bci = s.cur_bci() + sw.default_offset();
assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
successors.push(_methodBlocks->block_containing(dest_bci)); successors.push(_methodBlocks->block_containing(dest_bci));
assert(s.next_bci() == limit_bci, "branch must end block"); assert(s.next_bci() == limit_bci, "branch must end block");
@ -779,15 +779,15 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
case Bytecodes::_lookupswitch: case Bytecodes::_lookupswitch:
{ {
state.spop(); state.spop();
Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(s.cur_bcp()); Bytecode_lookupswitch sw(&s);
int len = switch_->number_of_pairs(); int len = sw.number_of_pairs();
int dest_bci; int dest_bci;
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
dest_bci = s.cur_bci() + switch_->pair_at(i)->offset(); dest_bci = s.cur_bci() + sw.pair_at(i).offset();
assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
successors.push(_methodBlocks->block_containing(dest_bci)); successors.push(_methodBlocks->block_containing(dest_bci));
} }
dest_bci = s.cur_bci() + switch_->default_offset(); dest_bci = s.cur_bci() + sw.default_offset();
assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block"); assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
successors.push(_methodBlocks->block_containing(dest_bci)); successors.push(_methodBlocks->block_containing(dest_bci));
fall_through = false; fall_through = false;

View File

@ -409,15 +409,15 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
} else { } else {
fail_type = _unloaded_ciinstance_klass; fail_type = _unloaded_ciinstance_klass;
} }
klassOop found_klass; KlassHandle found_klass;
if (!require_local) { if (!require_local) {
found_klass = klassOop kls = SystemDictionary::find_constrained_instance_or_array_klass(
SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, sym, loader, KILL_COMPILE_ON_FATAL_(fail_type));
KILL_COMPILE_ON_FATAL_(fail_type)); found_klass = KlassHandle(THREAD, kls);
} else { } else {
found_klass = klassOop kls = SystemDictionary::find_instance_or_array_klass(
SystemDictionary::find_instance_or_array_klass(sym, loader, domain, sym, loader, domain, KILL_COMPILE_ON_FATAL_(fail_type));
KILL_COMPILE_ON_FATAL_(fail_type)); found_klass = KlassHandle(THREAD, kls);
} }
// If we fail to find an array klass, look again for its element type. // If we fail to find an array klass, look again for its element type.
@ -444,9 +444,9 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
} }
} }
if (found_klass != NULL) { if (found_klass() != NULL) {
// Found it. Build a CI handle. // Found it. Build a CI handle.
return get_object(found_klass)->as_klass(); return get_object(found_klass())->as_klass();
} }
if (require_local) return NULL; if (require_local) return NULL;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -144,7 +144,7 @@ class ciMethod : public ciObject {
Bytecodes::Code java_code_at_bci(int bci) { Bytecodes::Code java_code_at_bci(int bci) {
address bcp = code() + bci; address bcp = code() + bci;
return Bytecodes::java_code_at(bcp); return Bytecodes::java_code_at(NULL, bcp);
} }
BCEscapeAnalyzer *get_bcea(); BCEscapeAnalyzer *get_bcea();
ciMethodBlocks *get_method_blocks(); ciMethodBlocks *get_method_blocks();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -175,15 +175,15 @@ void ciMethodBlocks::do_analysis() {
case Bytecodes::_tableswitch : case Bytecodes::_tableswitch :
{ {
cur_block->set_control_bci(bci); cur_block->set_control_bci(bci);
Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(s.cur_bcp()); Bytecode_tableswitch sw(&s);
int len = switch_->length(); int len = sw.length();
ciBlock *dest; ciBlock *dest;
int dest_bci; int dest_bci;
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
dest_bci = s.cur_bci() + switch_->dest_offset_at(i); dest_bci = s.cur_bci() + sw.dest_offset_at(i);
dest = make_block_at(dest_bci); dest = make_block_at(dest_bci);
} }
dest_bci = s.cur_bci() + switch_->default_offset(); dest_bci = s.cur_bci() + sw.default_offset();
make_block_at(dest_bci); make_block_at(dest_bci);
if (s.next_bci() < limit_bci) { if (s.next_bci() < limit_bci) {
dest = make_block_at(s.next_bci()); dest = make_block_at(s.next_bci());
@ -194,15 +194,15 @@ void ciMethodBlocks::do_analysis() {
case Bytecodes::_lookupswitch: case Bytecodes::_lookupswitch:
{ {
cur_block->set_control_bci(bci); cur_block->set_control_bci(bci);
Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(s.cur_bcp()); Bytecode_lookupswitch sw(&s);
int len = switch_->number_of_pairs(); int len = sw.number_of_pairs();
ciBlock *dest; ciBlock *dest;
int dest_bci; int dest_bci;
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
dest_bci = s.cur_bci() + switch_->pair_at(i)->offset(); dest_bci = s.cur_bci() + sw.pair_at(i).offset();
dest = make_block_at(dest_bci); dest = make_block_at(dest_bci);
} }
dest_bci = s.cur_bci() + switch_->default_offset(); dest_bci = s.cur_bci() + sw.default_offset();
dest = make_block_at(dest_bci); dest = make_block_at(dest_bci);
if (s.next_bci() < limit_bci) { if (s.next_bci() < limit_bci) {
dest = make_block_at(s.next_bci()); dest = make_block_at(s.next_bci());

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,11 +38,12 @@
// Return an adapter for this MethodHandle. // Return an adapter for this MethodHandle.
ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const { ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
VM_ENTRY_MARK; VM_ENTRY_MARK;
Handle h(get_oop()); Handle h(get_oop());
methodHandle callee(_callee->get_methodOop()); methodHandle callee(_callee->get_methodOop());
MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD); // We catch all exceptions here that could happen in the method
methodHandle m = mhc.compile(CHECK_NULL); // handle compiler and stop the VM.
MethodHandleCompiler mhc(h, callee, is_invokedynamic, CATCH);
methodHandle m = mhc.compile(CATCH);
return CURRENT_ENV->get_object(m())->as_method(); return CURRENT_ENV->get_object(m())->as_method();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -78,8 +78,8 @@ private:
else { assert(!is_wide(), "must not be a wide instruction"); } else { assert(!is_wide(), "must not be a wide instruction"); }
} }
Bytecode* bytecode() const { return Bytecode_at(_bc_start); } Bytecode bytecode() const { return Bytecode(this, _bc_start); }
Bytecode* next_bytecode() const { return Bytecode_at(_pc); } Bytecode next_bytecode() const { return Bytecode(this, _pc); }
public: public:
// End-Of-Bytecodes // End-Of-Bytecodes
@ -151,11 +151,11 @@ public:
bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); } bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
int get_index_u1() const { int get_index_u1() const {
return bytecode()->get_index_u1(cur_bc_raw()); return bytecode().get_index_u1(cur_bc_raw());
} }
int get_index_u1_cpcache() const { int get_index_u1_cpcache() const {
return bytecode()->get_index_u1_cpcache(cur_bc_raw()); return bytecode().get_index_u1_cpcache(cur_bc_raw());
} }
// Get a byte index following this bytecode. // Get a byte index following this bytecode.
@ -169,29 +169,29 @@ public:
// Get 2-byte index (byte swapping depending on which bytecode) // Get 2-byte index (byte swapping depending on which bytecode)
int get_index_u2(bool is_wide = false) const { int get_index_u2(bool is_wide = false) const {
return bytecode()->get_index_u2(cur_bc_raw(), is_wide); return bytecode().get_index_u2(cur_bc_raw(), is_wide);
} }
// Get 2-byte index in native byte order. (Rewriter::rewrite makes these.) // Get 2-byte index in native byte order. (Rewriter::rewrite makes these.)
int get_index_u2_cpcache() const { int get_index_u2_cpcache() const {
return bytecode()->get_index_u2_cpcache(cur_bc_raw()); return bytecode().get_index_u2_cpcache(cur_bc_raw());
} }
// Get 4-byte index, for invokedynamic. // Get 4-byte index, for invokedynamic.
int get_index_u4() const { int get_index_u4() const {
return bytecode()->get_index_u4(cur_bc_raw()); return bytecode().get_index_u4(cur_bc_raw());
} }
bool has_index_u4() const { bool has_index_u4() const {
return bytecode()->has_index_u4(cur_bc_raw()); return bytecode().has_index_u4(cur_bc_raw());
} }
// Get dimensions byte (multinewarray) // Get dimensions byte (multinewarray)
int get_dimensions() const { return *(unsigned char*)(_pc-1); } int get_dimensions() const { return *(unsigned char*)(_pc-1); }
// Sign-extended index byte/short, no widening // Sign-extended index byte/short, no widening
int get_constant_u1() const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); } int get_constant_u1() const { return bytecode().get_constant_u1(instruction_size()-1, cur_bc_raw()); }
int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); } int get_constant_u2(bool is_wide = false) const { return bytecode().get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
// Get a byte signed constant for "iinc". Invalid for other bytecodes. // Get a byte signed constant for "iinc". Invalid for other bytecodes.
// If prefixed with a wide bytecode, get a wide constant // If prefixed with a wide bytecode, get a wide constant
@ -199,18 +199,18 @@ public:
// 2-byte branch offset from current pc // 2-byte branch offset from current pc
int get_dest() const { int get_dest() const {
return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw()); return cur_bci() + bytecode().get_offset_s2(cur_bc_raw());
} }
// 2-byte branch offset from next pc // 2-byte branch offset from next pc
int next_get_dest() const { int next_get_dest() const {
assert(_pc < _end, ""); assert(_pc < _end, "");
return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq); return next_bci() + next_bytecode().get_offset_s2(Bytecodes::_ifeq);
} }
// 4-byte branch offset from current pc // 4-byte branch offset from current pc
int get_far_dest() const { int get_far_dest() const {
return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw()); return cur_bci() + bytecode().get_offset_s4(cur_bc_raw());
} }
// For a lookup or switch table, return target destination // For a lookup or switch table, return target destination
@ -407,4 +407,11 @@ public:
} }
}; };
// Implementation for declarations in bytecode.hpp
Bytecode::Bytecode(const ciBytecodeStream* stream, address bcp): _bcp(bcp != NULL ? bcp : stream->cur_bcp()), _code(Bytecodes::code_at(NULL, addr_at(0))) {}
Bytecode_lookupswitch::Bytecode_lookupswitch(const ciBytecodeStream* stream): Bytecode(stream) { verify(); }
Bytecode_tableswitch::Bytecode_tableswitch(const ciBytecodeStream* stream): Bytecode(stream) { verify(); }
#endif // SHARE_VM_CI_CISTREAMS_HPP #endif // SHARE_VM_CI_CISTREAMS_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1698,18 +1698,17 @@ ciTypeFlow::Block::successors(ciBytecodeStream* str,
break; break;
case Bytecodes::_tableswitch: { case Bytecodes::_tableswitch: {
Bytecode_tableswitch *tableswitch = Bytecode_tableswitch tableswitch(str);
Bytecode_tableswitch_at(str->cur_bcp());
int len = tableswitch->length(); int len = tableswitch.length();
_successors = _successors =
new (arena) GrowableArray<Block*>(arena, len+1, 0, NULL); new (arena) GrowableArray<Block*>(arena, len+1, 0, NULL);
int bci = current_bci + tableswitch->default_offset(); int bci = current_bci + tableswitch.default_offset();
Block* block = analyzer->block_at(bci, jsrs); Block* block = analyzer->block_at(bci, jsrs);
assert(_successors->length() == SWITCH_DEFAULT, ""); assert(_successors->length() == SWITCH_DEFAULT, "");
_successors->append(block); _successors->append(block);
while (--len >= 0) { while (--len >= 0) {
int bci = current_bci + tableswitch->dest_offset_at(len); int bci = current_bci + tableswitch.dest_offset_at(len);
block = analyzer->block_at(bci, jsrs); block = analyzer->block_at(bci, jsrs);
assert(_successors->length() >= SWITCH_CASES, ""); assert(_successors->length() >= SWITCH_CASES, "");
_successors->append_if_missing(block); _successors->append_if_missing(block);
@ -1718,19 +1717,18 @@ ciTypeFlow::Block::successors(ciBytecodeStream* str,
} }
case Bytecodes::_lookupswitch: { case Bytecodes::_lookupswitch: {
Bytecode_lookupswitch *lookupswitch = Bytecode_lookupswitch lookupswitch(str);
Bytecode_lookupswitch_at(str->cur_bcp());
int npairs = lookupswitch->number_of_pairs(); int npairs = lookupswitch.number_of_pairs();
_successors = _successors =
new (arena) GrowableArray<Block*>(arena, npairs+1, 0, NULL); new (arena) GrowableArray<Block*>(arena, npairs+1, 0, NULL);
int bci = current_bci + lookupswitch->default_offset(); int bci = current_bci + lookupswitch.default_offset();
Block* block = analyzer->block_at(bci, jsrs); Block* block = analyzer->block_at(bci, jsrs);
assert(_successors->length() == SWITCH_DEFAULT, ""); assert(_successors->length() == SWITCH_DEFAULT, "");
_successors->append(block); _successors->append(block);
while(--npairs >= 0) { while(--npairs >= 0) {
LookupswitchPair *pair = lookupswitch->pair_at(npairs); LookupswitchPair pair = lookupswitch.pair_at(npairs);
int bci = current_bci + pair->offset(); int bci = current_bci + pair.offset();
Block* block = analyzer->block_at(bci, jsrs); Block* block = analyzer->block_at(bci, jsrs);
assert(_successors->length() >= SWITCH_CASES, ""); assert(_successors->length() >= SWITCH_CASES, "");
_successors->append_if_missing(block); _successors->append_if_missing(block);

View File

@ -1382,3 +1382,61 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
} }
#endif //PRODUCT #endif //PRODUCT
// Please keep following two functions at end of this file. With them placed at top or in middle of the file,
// they could get inlined by agressive compiler, an unknown trick, see bug 6966589.
void PerfClassTraceTime::initialize() {
if (!UsePerfData) return;
if (_eventp != NULL) {
// increment the event counter
_eventp->inc();
}
// stop the current active thread-local timer to measure inclusive time
_prev_active_event = -1;
for (int i=0; i < EVENT_TYPE_COUNT; i++) {
if (_timers[i].is_active()) {
assert(_prev_active_event == -1, "should have only one active timer");
_prev_active_event = i;
_timers[i].stop();
}
}
if (_recursion_counters == NULL || (_recursion_counters[_event_type])++ == 0) {
// start the inclusive timer if not recursively called
_t.start();
}
// start thread-local timer of the given event type
if (!_timers[_event_type].is_active()) {
_timers[_event_type].start();
}
}
PerfClassTraceTime::~PerfClassTraceTime() {
if (!UsePerfData) return;
// stop the thread-local timer as the event completes
// and resume the thread-local timer of the event next on the stack
_timers[_event_type].stop();
jlong selftime = _timers[_event_type].ticks();
if (_prev_active_event >= 0) {
_timers[_prev_active_event].start();
}
if (_recursion_counters != NULL && --(_recursion_counters[_event_type]) > 0) return;
// increment the counters only on the leaf call
_t.stop();
_timep->inc(_t.ticks());
if (_selftimep != NULL) {
_selftimep->inc(selftime);
}
// add all class loading related event selftime to the accumulated time counter
ClassLoader::perf_accumulated_time()->inc(selftime);
// reset the timer
_timers[_event_type].reset();
}

View File

@ -356,111 +356,57 @@ class ClassLoader: AllStatic {
// (i.e. only one event type) are active at a time even multiple PerfClassTraceTime // (i.e. only one event type) are active at a time even multiple PerfClassTraceTime
// instances have been created as multiple events are happening. // instances have been created as multiple events are happening.
class PerfClassTraceTime { class PerfClassTraceTime {
public: public:
enum { enum {
CLASS_LOAD = 0, CLASS_LOAD = 0,
PARSE_CLASS = 1, PARSE_CLASS = 1,
CLASS_LINK = 2, CLASS_LINK = 2,
CLASS_VERIFY = 3, CLASS_VERIFY = 3,
CLASS_CLINIT = 4, CLASS_CLINIT = 4,
DEFINE_CLASS = 5, DEFINE_CLASS = 5,
EVENT_TYPE_COUNT = 6 EVENT_TYPE_COUNT = 6
}; };
protected: protected:
// _t tracks time from initialization to destruction of this timer instance // _t tracks time from initialization to destruction of this timer instance
// including time for all other event types, and recursive calls of this type. // including time for all other event types, and recursive calls of this type.
// When a timer is called recursively, the elapsedTimer _t would not be used. // When a timer is called recursively, the elapsedTimer _t would not be used.
elapsedTimer _t; elapsedTimer _t;
PerfLongCounter* _timep; PerfLongCounter* _timep;
PerfLongCounter* _selftimep; PerfLongCounter* _selftimep;
PerfLongCounter* _eventp; PerfLongCounter* _eventp;
// pointer to thread-local recursion counter and timer array // pointer to thread-local recursion counter and timer array
// The thread_local timers track cumulative time for specific event types // The thread_local timers track cumulative time for specific event types
// exclusive of time for other event types, but including recursive calls // exclusive of time for other event types, but including recursive calls
// of the same type. // of the same type.
int* _recursion_counters; int* _recursion_counters;
elapsedTimer* _timers; elapsedTimer* _timers;
int _event_type; int _event_type;
int _prev_active_event; int _prev_active_event;
public: public:
inline PerfClassTraceTime(PerfLongCounter* timep, /* counter incremented with inclusive time */ inline PerfClassTraceTime(PerfLongCounter* timep, /* counter incremented with inclusive time */
PerfLongCounter* selftimep, /* counter incremented with exclusive time */ PerfLongCounter* selftimep, /* counter incremented with exclusive time */
PerfLongCounter* eventp, /* event counter */ PerfLongCounter* eventp, /* event counter */
int* recursion_counters, /* thread-local recursion counter array */ int* recursion_counters, /* thread-local recursion counter array */
elapsedTimer* timers, /* thread-local timer array */ elapsedTimer* timers, /* thread-local timer array */
int type /* event type */ ) : int type /* event type */ ) :
_timep(timep), _selftimep(selftimep), _eventp(eventp), _recursion_counters(recursion_counters), _timers(timers), _event_type(type) { _timep(timep), _selftimep(selftimep), _eventp(eventp), _recursion_counters(recursion_counters), _timers(timers), _event_type(type) {
initialize(); initialize();
} }
inline PerfClassTraceTime(PerfLongCounter* timep, /* counter incremented with inclusive time */ inline PerfClassTraceTime(PerfLongCounter* timep, /* counter incremented with inclusive time */
elapsedTimer* timers, /* thread-local timer array */ elapsedTimer* timers, /* thread-local timer array */
int type /* event type */ ) : int type /* event type */ ) :
_timep(timep), _selftimep(NULL), _eventp(NULL), _recursion_counters(NULL), _timers(timers), _event_type(type) { _timep(timep), _selftimep(NULL), _eventp(NULL), _recursion_counters(NULL), _timers(timers), _event_type(type) {
initialize(); initialize();
} }
void initialize() { inline void suspend() { _t.stop(); _timers[_event_type].stop(); }
if (!UsePerfData) return; inline void resume() { _t.start(); _timers[_event_type].start(); }
if (_eventp != NULL) { ~PerfClassTraceTime();
// increment the event counter void initialize();
_eventp->inc();
}
// stop the current active thread-local timer to measure inclusive time
_prev_active_event = -1;
for (int i=0; i < EVENT_TYPE_COUNT; i++) {
if (_timers[i].is_active()) {
assert(_prev_active_event == -1, "should have only one active timer");
_prev_active_event = i;
_timers[i].stop();
}
}
if (_recursion_counters == NULL || (_recursion_counters[_event_type])++ == 0) {
// start the inclusive timer if not recursively called
_t.start();
}
// start thread-local timer of the given event type
if (!_timers[_event_type].is_active()) {
_timers[_event_type].start();
}
}
inline void suspend() { _t.stop(); _timers[_event_type].stop(); }
inline void resume() { _t.start(); _timers[_event_type].start(); }
~PerfClassTraceTime() {
if (!UsePerfData) return;
// stop the thread-local timer as the event completes
// and resume the thread-local timer of the event next on the stack
_timers[_event_type].stop();
jlong selftime = _timers[_event_type].ticks();
if (_prev_active_event >= 0) {
_timers[_prev_active_event].start();
}
if (_recursion_counters != NULL && --(_recursion_counters[_event_type]) > 0) return;
// increment the counters only on the leaf call
_t.stop();
_timep->inc(_t.ticks());
if (_selftimep != NULL) {
_selftimep->inc(selftime);
}
// add all class loading related event selftime to the accumulated time counter
ClassLoader::perf_accumulated_time()->inc(selftime);
// reset the timer
_timers[_event_type].reset();
}
}; };
#endif // SHARE_VM_CLASSFILE_CLASSLOADER_HPP #endif // SHARE_VM_CLASSFILE_CLASSLOADER_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -172,6 +172,8 @@ class SymbolPropertyTable;
\ \
template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \ template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
\ \
template(sun_misc_PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \
\
/* Preload boxing klasses */ \ /* Preload boxing klasses */ \
template(Boolean_klass, java_lang_Boolean, Pre) \ template(Boolean_klass, java_lang_Boolean, Pre) \
template(Character_klass, java_lang_Character, Pre) \ template(Character_klass, java_lang_Character, Pre) \

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -111,6 +111,7 @@
template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \ template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \ template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
template(setBootClassLoaderHook_name, "setBootClassLoaderHook") \ template(setBootClassLoaderHook_name, "setBootClassLoaderHook") \
template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \
\ \
/* class file format tags */ \ /* class file format tags */ \
template(tag_source_file, "SourceFile") \ template(tag_source_file, "SourceFile") \

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1863,9 +1863,9 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
#ifndef SHARK #ifndef SHARK
if (!method()->is_native()) { if (!method()->is_native()) {
SimpleScopeDesc ssd(this, fr.pc()); SimpleScopeDesc ssd(this, fr.pc());
Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci()); Bytecode_invoke call(ssd.method(), ssd.bci());
bool has_receiver = call->has_receiver(); bool has_receiver = call.has_receiver();
symbolOop signature = call->signature(); symbolOop signature = call.signature();
fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f); fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
} }
#endif // !SHARK #endif // !SHARK
@ -2698,8 +2698,7 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
} else if (sd->method()->is_native()) { } else if (sd->method()->is_native()) {
st->print("method is native"); st->print("method is native");
} else { } else {
address bcp = sd->method()->bcp_from(sd->bci()); Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
Bytecodes::Code bc = Bytecodes::java_code_at(bcp);
st->print(";*%s", Bytecodes::name(bc)); st->print(";*%s", Bytecodes::name(bc));
switch (bc) { switch (bc) {
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
@ -2707,10 +2706,10 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
{ {
Bytecode_invoke* invoke = Bytecode_invoke_at(sd->method(), sd->bci()); Bytecode_invoke invoke(sd->method(), sd->bci());
st->print(" "); st->print(" ");
if (invoke->name() != NULL) if (invoke.name() != NULL)
invoke->name()->print_symbol_on(st); invoke.name()->print_symbol_on(st);
else else
st->print("<UNKNOWN>"); st->print("<UNKNOWN>");
break; break;
@ -2720,10 +2719,10 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
case Bytecodes::_getstatic: case Bytecodes::_getstatic:
case Bytecodes::_putstatic: case Bytecodes::_putstatic:
{ {
Bytecode_field* field = Bytecode_field_at(sd->method(), sd->bci()); Bytecode_field field(sd->method(), sd->bci());
st->print(" "); st->print(" ");
if (field->name() != NULL) if (field.name() != NULL)
field->name()->print_symbol_on(st); field.name()->print_symbol_on(st);
else else
st->print("<UNKNOWN>"); st->print("<UNKNOWN>");
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -286,16 +286,15 @@ void MethodLiveness::init_basic_blocks() {
break; break;
case Bytecodes::_tableswitch: case Bytecodes::_tableswitch:
{ {
Bytecode_tableswitch *tableswitch = Bytecode_tableswitch tableswitch(&bytes);
Bytecode_tableswitch_at(bytes.cur_bcp());
int len = tableswitch->length(); int len = tableswitch.length();
dest = _block_map->at(bci + tableswitch->default_offset()); dest = _block_map->at(bci + tableswitch.default_offset());
assert(dest != NULL, "branch desination must start a block."); assert(dest != NULL, "branch desination must start a block.");
dest->add_normal_predecessor(current_block); dest->add_normal_predecessor(current_block);
while (--len >= 0) { while (--len >= 0) {
dest = _block_map->at(bci + tableswitch->dest_offset_at(len)); dest = _block_map->at(bci + tableswitch.dest_offset_at(len));
assert(dest != NULL, "branch desination must start a block."); assert(dest != NULL, "branch desination must start a block.");
dest->add_normal_predecessor(current_block); dest->add_normal_predecessor(current_block);
} }
@ -304,17 +303,16 @@ void MethodLiveness::init_basic_blocks() {
case Bytecodes::_lookupswitch: case Bytecodes::_lookupswitch:
{ {
Bytecode_lookupswitch *lookupswitch = Bytecode_lookupswitch lookupswitch(&bytes);
Bytecode_lookupswitch_at(bytes.cur_bcp());
int npairs = lookupswitch->number_of_pairs(); int npairs = lookupswitch.number_of_pairs();
dest = _block_map->at(bci + lookupswitch->default_offset()); dest = _block_map->at(bci + lookupswitch.default_offset());
assert(dest != NULL, "branch desination must start a block."); assert(dest != NULL, "branch desination must start a block.");
dest->add_normal_predecessor(current_block); dest->add_normal_predecessor(current_block);
while(--npairs >= 0) { while(--npairs >= 0) {
LookupswitchPair *pair = lookupswitch->pair_at(npairs); LookupswitchPair pair = lookupswitch.pair_at(npairs);
dest = _block_map->at( bci + pair->offset()); dest = _block_map->at( bci + pair.offset());
assert(dest != NULL, "branch desination must start a block."); assert(dest != NULL, "branch desination must start a block.");
dest->add_normal_predecessor(current_block); dest->add_normal_predecessor(current_block);
} }

View File

@ -3478,6 +3478,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
assert(_collectorState == InitialMarking, "Wrong collector state"); assert(_collectorState == InitialMarking, "Wrong collector state");
check_correct_thread_executing(); check_correct_thread_executing();
TraceCMSMemoryManagerStats tms(_collectorState); TraceCMSMemoryManagerStats tms(_collectorState);
ReferenceProcessor* rp = ref_processor(); ReferenceProcessor* rp = ref_processor();
SpecializationStats::clear(); SpecializationStats::clear();
assert(_restart_addr == NULL, "Control point invariant"); assert(_restart_addr == NULL, "Control point invariant");
@ -4978,6 +4979,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
if (should_unload_classes()) { if (should_unload_classes()) {
CodeCache::gc_epilogue(); CodeCache::gc_epilogue();
} }
JvmtiExport::gc_epilogue();
// If we encountered any (marking stack / work queue) overflow // If we encountered any (marking stack / work queue) overflow
// events during the current CMS cycle, take appropriate // events during the current CMS cycle, take appropriate
@ -5940,11 +5942,6 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
} }
rp->verify_no_references_recorded(); rp->verify_no_references_recorded();
assert(!rp->discovery_enabled(), "should have been disabled"); assert(!rp->discovery_enabled(), "should have been disabled");
// JVMTI object tagging is based on JNI weak refs. If any of these
// refs were cleared then JVMTI needs to update its maps and
// maybe post ObjectFrees to agents.
JvmtiExport::cms_ref_processing_epilogue();
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -6305,6 +6302,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
switch (op) { switch (op) {
case CMS_op_checkpointRootsInitial: { case CMS_op_checkpointRootsInitial: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsInitial(true); // asynch checkpointRootsInitial(true); // asynch
if (PrintGC) { if (PrintGC) {
_cmsGen->printOccupancy("initial-mark"); _cmsGen->printOccupancy("initial-mark");
@ -6312,6 +6310,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
break; break;
} }
case CMS_op_checkpointRootsFinal: { case CMS_op_checkpointRootsFinal: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsFinal(true, // asynch checkpointRootsFinal(true, // asynch
false, // !clear_all_soft_refs false, // !clear_all_soft_refs
false); // !init_mark_was_synchronous false); // !init_mark_was_synchronous
@ -7881,25 +7880,23 @@ SweepClosure::SweepClosure(CMSCollector* collector,
} }
// We need this destructor to reclaim any space at the end // We need this destructor to reclaim any space at the end
// of the space, which do_blk below may not have added back to // of the space, which do_blk below may not yet have added back to
// the free lists. [basically dealing with the "fringe effect"] // the free lists.
SweepClosure::~SweepClosure() { SweepClosure::~SweepClosure() {
assert_lock_strong(_freelistLock); assert_lock_strong(_freelistLock);
// this should be treated as the end of a free run if any assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
// The current free range should be returned to the free lists "sweep _limit out of bounds");
// as one coalesced chunk. // Flush any remaining coterminal free run as a single
// coalesced chunk to the appropriate free list.
if (inFreeRange()) { if (inFreeRange()) {
flushCurFreeChunk(freeFinger(), assert(freeFinger() < _limit, "freeFinger points too high");
pointer_delta(_limit, freeFinger())); flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger()));
assert(freeFinger() < _limit, "the finger pointeth off base");
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print("destructor:"); gclog_or_tty->print("Sweep: last chunk: ");
gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") " gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
"[coalesced:"SIZE_FORMAT"]\n", freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
freeFinger(), pointer_delta(_limit, freeFinger()),
lastFreeRangeCoalesced());
} }
} } // else nothing to flush
NOT_PRODUCT( NOT_PRODUCT(
if (Verbose && PrintGC) { if (Verbose && PrintGC) {
gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
@ -7936,9 +7933,8 @@ SweepClosure::~SweepClosure() {
void SweepClosure::initialize_free_range(HeapWord* freeFinger, void SweepClosure::initialize_free_range(HeapWord* freeFinger,
bool freeRangeInFreeLists) { bool freeRangeInFreeLists) {
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n", gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
freeFinger, _sp->block_size(freeFinger), freeFinger, freeRangeInFreeLists);
freeRangeInFreeLists);
} }
assert(!inFreeRange(), "Trampling existing free range"); assert(!inFreeRange(), "Trampling existing free range");
set_inFreeRange(true); set_inFreeRange(true);
@ -7993,21 +7989,36 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
// may have caused us to coalesce the block ending at the address _limit // may have caused us to coalesce the block ending at the address _limit
// with a newly expanded chunk (this happens when _limit was set to the // with a newly expanded chunk (this happens when _limit was set to the
// previous _end of the space), so we may have stepped past _limit; see CR 6977970. // previous _end of the space), so we may have stepped past _limit; see CR 6977970.
if (addr >= _limit) { // we have swept up to or past the limit, do nothing more if (addr >= _limit) { // we have swept up to or past the limit: finish up
assert(_limit >= _sp->bottom() && _limit <= _sp->end(), assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds"); "sweep _limit out of bounds");
assert(addr < _sp->end(), "addr out of bounds"); assert(addr < _sp->end(), "addr out of bounds");
// help the closure application finish // Flush any remaining coterminal free run as a single
// coalesced chunk to the appropriate free list.
if (inFreeRange()) {
assert(freeFinger() < _limit, "finger points too high");
flush_cur_free_chunk(freeFinger(),
pointer_delta(addr, freeFinger()));
if (CMSTraceSweeper) {
gclog_or_tty->print("Sweep: last chunk: ");
gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
"[coalesced:"SIZE_FORMAT"]\n",
freeFinger(), pointer_delta(addr, freeFinger()),
lastFreeRangeCoalesced());
}
}
// help the iterator loop finish
return pointer_delta(_sp->end(), addr); return pointer_delta(_sp->end(), addr);
} }
assert(addr < _limit, "sweep invariant");
assert(addr < _limit, "sweep invariant");
// check if we should yield // check if we should yield
do_yield_check(addr); do_yield_check(addr);
if (fc->isFree()) { if (fc->isFree()) {
// Chunk that is already free // Chunk that is already free
res = fc->size(); res = fc->size();
doAlreadyFreeChunk(fc); do_already_free_chunk(fc);
debug_only(_sp->verifyFreeLists()); debug_only(_sp->verifyFreeLists());
assert(res == fc->size(), "Don't expect the size to change"); assert(res == fc->size(), "Don't expect the size to change");
NOT_PRODUCT( NOT_PRODUCT(
@ -8017,7 +8028,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
NOT_PRODUCT(_last_fc = fc;) NOT_PRODUCT(_last_fc = fc;)
} else if (!_bitMap->isMarked(addr)) { } else if (!_bitMap->isMarked(addr)) {
// Chunk is fresh garbage // Chunk is fresh garbage
res = doGarbageChunk(fc); res = do_garbage_chunk(fc);
debug_only(_sp->verifyFreeLists()); debug_only(_sp->verifyFreeLists());
NOT_PRODUCT( NOT_PRODUCT(
_numObjectsFreed++; _numObjectsFreed++;
@ -8025,7 +8036,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
) )
} else { } else {
// Chunk that is alive. // Chunk that is alive.
res = doLiveChunk(fc); res = do_live_chunk(fc);
debug_only(_sp->verifyFreeLists()); debug_only(_sp->verifyFreeLists());
NOT_PRODUCT( NOT_PRODUCT(
_numObjectsLive++; _numObjectsLive++;
@ -8078,7 +8089,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
// to a free list which may be overpopulated. // to a free list which may be overpopulated.
// //
void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) { void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
size_t size = fc->size(); size_t size = fc->size();
// Chunks that cannot be coalesced are not in the // Chunks that cannot be coalesced are not in the
// free lists. // free lists.
@ -8094,23 +8105,23 @@ void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
// addr and purported end of this block. // addr and purported end of this block.
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
// Some chunks cannot be coalesced in under any circumstances. // Some chunks cannot be coalesced under any circumstances.
// See the definition of cantCoalesce(). // See the definition of cantCoalesce().
if (!fc->cantCoalesce()) { if (!fc->cantCoalesce()) {
// This chunk can potentially be coalesced. // This chunk can potentially be coalesced.
if (_sp->adaptive_freelists()) { if (_sp->adaptive_freelists()) {
// All the work is done in // All the work is done in
doPostIsFreeOrGarbageChunk(fc, size); do_post_free_or_garbage_chunk(fc, size);
} else { // Not adaptive free lists } else { // Not adaptive free lists
// this is a free chunk that can potentially be coalesced by the sweeper; // this is a free chunk that can potentially be coalesced by the sweeper;
if (!inFreeRange()) { if (!inFreeRange()) {
// if the next chunk is a free block that can't be coalesced // if the next chunk is a free block that can't be coalesced
// it doesn't make sense to remove this chunk from the free lists // it doesn't make sense to remove this chunk from the free lists
FreeChunk* nextChunk = (FreeChunk*)(addr + size); FreeChunk* nextChunk = (FreeChunk*)(addr + size);
assert((HeapWord*)nextChunk <= _limit, "sweep invariant"); assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
if ((HeapWord*)nextChunk < _limit && // there's a next chunk... if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
nextChunk->isFree() && // which is free... nextChunk->isFree() && // ... which is free...
nextChunk->cantCoalesce()) { // ... but cant be coalesced nextChunk->cantCoalesce()) { // ... but can't be coalesced
// nothing to do // nothing to do
} else { } else {
// Potentially the start of a new free range: // Potentially the start of a new free range:
@ -8156,14 +8167,14 @@ void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
// as the end of a free run if any // as the end of a free run if any
if (inFreeRange()) { if (inFreeRange()) {
// we kicked some butt; time to pick up the garbage // we kicked some butt; time to pick up the garbage
assert(freeFinger() < addr, "the finger pointeth off base"); assert(freeFinger() < addr, "freeFinger points too high");
flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger())); flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
} }
// else, nothing to do, just continue // else, nothing to do, just continue
} }
} }
size_t SweepClosure::doGarbageChunk(FreeChunk* fc) { size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// This is a chunk of garbage. It is not in any free list. // This is a chunk of garbage. It is not in any free list.
// Add it to a free list or let it possibly be coalesced into // Add it to a free list or let it possibly be coalesced into
// a larger chunk. // a larger chunk.
@ -8175,7 +8186,7 @@ size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
// addr and purported end of just dead object. // addr and purported end of just dead object.
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
doPostIsFreeOrGarbageChunk(fc, size); do_post_free_or_garbage_chunk(fc, size);
} else { } else {
if (!inFreeRange()) { if (!inFreeRange()) {
// start of a new free range // start of a new free range
@ -8214,35 +8225,16 @@ size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
return size; return size;
} }
size_t SweepClosure::doLiveChunk(FreeChunk* fc) { size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
HeapWord* addr = (HeapWord*) fc; HeapWord* addr = (HeapWord*) fc;
// The sweeper has just found a live object. Return any accumulated // The sweeper has just found a live object. Return any accumulated
// left hand chunk to the free lists. // left hand chunk to the free lists.
if (inFreeRange()) { if (inFreeRange()) {
if (_sp->adaptive_freelists()) { assert(freeFinger() < addr, "freeFinger points too high");
flushCurFreeChunk(freeFinger(), flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
pointer_delta(addr, freeFinger()));
} else { // not adaptive freelists
set_inFreeRange(false);
// Add the free range back to the free list if it is not already
// there.
if (!freeRangeInFreeLists()) {
assert(freeFinger() < addr, "the finger pointeth off base");
if (CMSTraceSweeper) {
gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
"[coalesced:%d]\n",
freeFinger(), pointer_delta(addr, freeFinger()),
lastFreeRangeCoalesced());
}
_sp->addChunkAndRepairOffsetTable(freeFinger(),
pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
}
}
} }
// Common code path for original and adaptive free lists. // This object is live: we'd normally expect this to be
// this object is live: we'd normally expect this to be
// an oop, and like to assert the following: // an oop, and like to assert the following:
// assert(oop(addr)->is_oop(), "live block should be an oop"); // assert(oop(addr)->is_oop(), "live block should be an oop");
// However, as we commented above, this may be an object whose // However, as we commented above, this may be an object whose
@ -8257,7 +8249,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
assert(size == CompactibleFreeListSpace::adjustObjectSize(size), assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
"alignment problem"); "alignment problem");
#ifdef DEBUG #ifdef DEBUG
if (oop(addr)->klass_or_null() != NULL && if (oop(addr)->klass_or_null() != NULL &&
( !_collector->should_unload_classes() ( !_collector->should_unload_classes()
|| (oop(addr)->is_parsable()) && || (oop(addr)->is_parsable()) &&
@ -8271,7 +8263,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
"P-mark and computed size do not agree"); "P-mark and computed size do not agree");
} }
#endif #endif
} else { } else {
// This should be an initialized object that's alive. // This should be an initialized object that's alive.
@ -8298,19 +8290,17 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
return size; return size;
} }
void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc, void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
size_t chunkSize) { size_t chunkSize) {
// doPostIsFreeOrGarbageChunk() should only be called in the smart allocation // do_post_free_or_garbage_chunk() should only be called in the case
// scheme. // of the adaptive free list allocator.
bool fcInFreeLists = fc->isFree(); bool fcInFreeLists = fc->isFree();
assert(_sp->adaptive_freelists(), "Should only be used in this case."); assert(_sp->adaptive_freelists(), "Should only be used in this case.");
assert((HeapWord*)fc <= _limit, "sweep invariant"); assert((HeapWord*)fc <= _limit, "sweep invariant");
if (CMSTestInFreeList && fcInFreeLists) { if (CMSTestInFreeList && fcInFreeLists) {
assert(_sp->verifyChunkInFreeLists(fc), assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists");
"free chunk is not in free lists");
} }
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize); gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
} }
@ -8382,20 +8372,21 @@ void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
if (inFreeRange()) { if (inFreeRange()) {
// In a free range but cannot coalesce with the right hand chunk. // In a free range but cannot coalesce with the right hand chunk.
// Put the current free range into the free lists. // Put the current free range into the free lists.
flushCurFreeChunk(freeFinger(), flush_cur_free_chunk(freeFinger(),
pointer_delta(addr, freeFinger())); pointer_delta(addr, freeFinger()));
} }
// Set up for new free range. Pass along whether the right hand // Set up for new free range. Pass along whether the right hand
// chunk is in the free lists. // chunk is in the free lists.
initialize_free_range((HeapWord*)fc, fcInFreeLists); initialize_free_range((HeapWord*)fc, fcInFreeLists);
} }
} }
void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
assert(inFreeRange(), "Should only be called if currently in a free range."); assert(inFreeRange(), "Should only be called if currently in a free range.");
assert(size > 0, assert(size > 0,
"A zero sized chunk cannot be added to the free lists."); "A zero sized chunk cannot be added to the free lists.");
if (!freeRangeInFreeLists()) { if (!freeRangeInFreeLists()) {
if(CMSTestInFreeList) { if (CMSTestInFreeList) {
FreeChunk* fc = (FreeChunk*) chunk; FreeChunk* fc = (FreeChunk*) chunk;
fc->setSize(size); fc->setSize(size);
assert(!_sp->verifyChunkInFreeLists(fc), assert(!_sp->verifyChunkInFreeLists(fc),
@ -8430,7 +8421,7 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
// chunk just flushed, they will need to wait for the next // chunk just flushed, they will need to wait for the next
// sweep to be coalesced. // sweep to be coalesced.
if (inFreeRange()) { if (inFreeRange()) {
flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger())); flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
} }
// First give up the locks, then yield, then re-lock. // First give up the locks, then yield, then re-lock.

View File

@ -1701,7 +1701,9 @@ class SweepClosure: public BlkClosureCareful {
CMSCollector* _collector; // collector doing the work CMSCollector* _collector; // collector doing the work
ConcurrentMarkSweepGeneration* _g; // Generation being swept ConcurrentMarkSweepGeneration* _g; // Generation being swept
CompactibleFreeListSpace* _sp; // Space being swept CompactibleFreeListSpace* _sp; // Space being swept
HeapWord* _limit; HeapWord* _limit;// the address at which the sweep should stop because
// we do not expect blocks eligible for sweeping past
// that address.
Mutex* _freelistLock; // Free list lock (in space) Mutex* _freelistLock; // Free list lock (in space)
CMSBitMap* _bitMap; // Marking bit map (in CMSBitMap* _bitMap; // Marking bit map (in
// generation) // generation)
@ -1745,14 +1747,13 @@ class SweepClosure: public BlkClosureCareful {
private: private:
// Code that is common to a free chunk or garbage when // Code that is common to a free chunk or garbage when
// encountered during sweeping. // encountered during sweeping.
void doPostIsFreeOrGarbageChunk(FreeChunk *fc, void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
size_t chunkSize);
// Process a free chunk during sweeping. // Process a free chunk during sweeping.
void doAlreadyFreeChunk(FreeChunk *fc); void do_already_free_chunk(FreeChunk *fc);
// Process a garbage chunk during sweeping. // Process a garbage chunk during sweeping.
size_t doGarbageChunk(FreeChunk *fc); size_t do_garbage_chunk(FreeChunk *fc);
// Process a live chunk during sweeping. // Process a live chunk during sweeping.
size_t doLiveChunk(FreeChunk* fc); size_t do_live_chunk(FreeChunk* fc);
// Accessors. // Accessors.
HeapWord* freeFinger() const { return _freeFinger; } HeapWord* freeFinger() const { return _freeFinger; }
@ -1769,7 +1770,7 @@ class SweepClosure: public BlkClosureCareful {
// Initialize a free range. // Initialize a free range.
void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
// Return this chunk to the free lists. // Return this chunk to the free lists.
void flushCurFreeChunk(HeapWord* chunk, size_t size); void flush_cur_free_chunk(HeapWord* chunk, size_t size);
// Check if we should yield and do so when necessary. // Check if we should yield and do so when necessary.
inline void do_yield_check(HeapWord* addr); inline void do_yield_check(HeapWord* addr);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "memory/genOopClosures.inline.hpp" #include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp" #include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
@ -457,6 +458,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_marking_task_overhead(1.0), _marking_task_overhead(1.0),
_cleanup_sleep_factor(0.0), _cleanup_sleep_factor(0.0),
_cleanup_task_overhead(1.0), _cleanup_task_overhead(1.0),
_cleanup_list("Cleanup List"),
_region_bm(max_regions, false /* in_resource_area*/), _region_bm(max_regions, false /* in_resource_area*/),
_card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
CardTableModRefBS::card_shift, CardTableModRefBS::card_shift,
@ -520,12 +522,6 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
satb_qs.set_buffer_size(G1SATBBufferSize); satb_qs.set_buffer_size(G1SATBBufferSize);
int size = (int) MAX2(ParallelGCThreads, (size_t)1);
_par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
for (int i = 0 ; i < size; i++) {
_par_cleanup_thread_state[i] = new ParCleanupThreadState;
}
_tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
_accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
@ -710,11 +706,6 @@ void ConcurrentMark::set_non_marking_state() {
} }
ConcurrentMark::~ConcurrentMark() { ConcurrentMark::~ConcurrentMark() {
int size = (int) MAX2(ParallelGCThreads, (size_t)1);
for (int i = 0; i < size; i++) delete _par_cleanup_thread_state[i];
FREE_C_HEAP_ARRAY(ParCleanupThreadState*,
_par_cleanup_thread_state);
for (int i = 0; i < (int) _max_task_num; ++i) { for (int i = 0; i < (int) _max_task_num; ++i) {
delete _task_queues->queue(i); delete _task_queues->queue(i);
delete _tasks[i]; delete _tasks[i];
@ -1142,6 +1133,8 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
return; return;
} }
SvcGCMarker sgcm(SvcGCMarker::OTHER);
if (VerifyDuringGC) { if (VerifyDuringGC) {
HandleMark hm; // handle scope HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)"); gclog_or_tty->print(" VerifyDuringGC:(before)");
@ -1168,12 +1161,12 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
if (G1TraceMarkStackOverflow) if (G1TraceMarkStackOverflow)
gclog_or_tty->print_cr("\nRemark led to restart for overflow."); gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
} else { } else {
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
// We're done with marking. // We're done with marking.
// This is the end of the marking cycle, we're expected all // This is the end of the marking cycle, we're expected all
// threads to have SATB queues with active set to true. // threads to have SATB queues with active set to true.
JavaThread::satb_mark_queue_set().set_active_all_threads( satb_mq_set.set_active_all_threads(false, /* new active value */
false, /* new active value */ true /* expected_active */);
true /* expected_active */);
if (VerifyDuringGC) { if (VerifyDuringGC) {
HandleMark hm; // handle scope HandleMark hm; // handle scope
@ -1507,21 +1500,20 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
size_t _max_live_bytes; size_t _max_live_bytes;
size_t _regions_claimed; size_t _regions_claimed;
size_t _freed_bytes; size_t _freed_bytes;
size_t _cleared_h_regions; FreeRegionList _local_cleanup_list;
size_t _freed_regions; HumongousRegionSet _humongous_proxy_set;
UncleanRegionList* _unclean_region_list;
double _claimed_region_time; double _claimed_region_time;
double _max_region_time; double _max_region_time;
public: public:
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
UncleanRegionList* list,
int worker_num); int worker_num);
size_t freed_bytes() { return _freed_bytes; } size_t freed_bytes() { return _freed_bytes; }
size_t cleared_h_regions() { return _cleared_h_regions; } FreeRegionList* local_cleanup_list() {
size_t freed_regions() { return _freed_regions; } return &_local_cleanup_list;
UncleanRegionList* unclean_region_list() { }
return _unclean_region_list; HumongousRegionSet* humongous_proxy_set() {
return &_humongous_proxy_set;
} }
bool doHeapRegion(HeapRegion *r); bool doHeapRegion(HeapRegion *r);
@ -1534,25 +1526,22 @@ public:
class G1ParNoteEndTask: public AbstractGangTask { class G1ParNoteEndTask: public AbstractGangTask {
friend class G1NoteEndOfConcMarkClosure; friend class G1NoteEndOfConcMarkClosure;
protected: protected:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
size_t _max_live_bytes; size_t _max_live_bytes;
size_t _freed_bytes; size_t _freed_bytes;
ConcurrentMark::ParCleanupThreadState** _par_cleanup_thread_state; FreeRegionList* _cleanup_list;
public: public:
G1ParNoteEndTask(G1CollectedHeap* g1h, G1ParNoteEndTask(G1CollectedHeap* g1h,
ConcurrentMark::ParCleanupThreadState** FreeRegionList* cleanup_list) :
par_cleanup_thread_state) :
AbstractGangTask("G1 note end"), _g1h(g1h), AbstractGangTask("G1 note end"), _g1h(g1h),
_max_live_bytes(0), _freed_bytes(0), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
_par_cleanup_thread_state(par_cleanup_thread_state)
{}
void work(int i) { void work(int i) {
double start = os::elapsedTime(); double start = os::elapsedTime();
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i);
&_par_cleanup_thread_state[i]->list,
i);
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i, _g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
HeapRegion::NoteEndClaimValue); HeapRegion::NoteEndClaimValue);
@ -1561,14 +1550,18 @@ public:
} }
assert(g1_note_end.complete(), "Shouldn't have yielded!"); assert(g1_note_end.complete(), "Shouldn't have yielded!");
// Now finish up freeing the current thread's regions. // Now update the lists
_g1h->finish_free_region_work(g1_note_end.freed_bytes(), _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
g1_note_end.cleared_h_regions(), NULL /* free_list */,
0, NULL); g1_note_end.humongous_proxy_set(),
true /* par */);
{ {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
_max_live_bytes += g1_note_end.max_live_bytes(); _max_live_bytes += g1_note_end.max_live_bytes();
_freed_bytes += g1_note_end.freed_bytes(); _freed_bytes += g1_note_end.freed_bytes();
_cleanup_list->add_as_tail(g1_note_end.local_cleanup_list());
assert(g1_note_end.local_cleanup_list()->is_empty(), "post-condition");
} }
double end = os::elapsedTime(); double end = os::elapsedTime();
if (G1PrintParCleanupStats) { if (G1PrintParCleanupStats) {
@ -1609,30 +1602,28 @@ public:
G1NoteEndOfConcMarkClosure:: G1NoteEndOfConcMarkClosure::
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
UncleanRegionList* list,
int worker_num) int worker_num)
: _g1(g1), _worker_num(worker_num), : _g1(g1), _worker_num(worker_num),
_max_live_bytes(0), _regions_claimed(0), _max_live_bytes(0), _regions_claimed(0),
_freed_bytes(0), _cleared_h_regions(0), _freed_regions(0), _freed_bytes(0),
_claimed_region_time(0.0), _max_region_time(0.0), _claimed_region_time(0.0), _max_region_time(0.0),
_unclean_region_list(list) _local_cleanup_list("Local Cleanup List"),
{} _humongous_proxy_set("Local Cleanup Humongous Proxy Set") { }
bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *r) { bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
// We use a claim value of zero here because all regions // We use a claim value of zero here because all regions
// were claimed with value 1 in the FinalCount task. // were claimed with value 1 in the FinalCount task.
r->reset_gc_time_stamp(); hr->reset_gc_time_stamp();
if (!r->continuesHumongous()) { if (!hr->continuesHumongous()) {
double start = os::elapsedTime(); double start = os::elapsedTime();
_regions_claimed++; _regions_claimed++;
r->note_end_of_marking(); hr->note_end_of_marking();
_max_live_bytes += r->max_live_bytes(); _max_live_bytes += hr->max_live_bytes();
_g1->free_region_if_totally_empty_work(r, _g1->free_region_if_totally_empty(hr,
_freed_bytes, &_freed_bytes,
_cleared_h_regions, &_local_cleanup_list,
_freed_regions, &_humongous_proxy_set,
_unclean_region_list, true /* par */);
true /*par*/);
double region_time = (os::elapsedTime() - start); double region_time = (os::elapsedTime() - start);
_claimed_region_time += region_time; _claimed_region_time += region_time;
if (region_time > _max_region_time) _max_region_time = region_time; if (region_time > _max_region_time) _max_region_time = region_time;
@ -1652,6 +1643,8 @@ void ConcurrentMark::cleanup() {
return; return;
} }
g1h->verify_region_sets_optional();
if (VerifyDuringGC) { if (VerifyDuringGC) {
HandleMark hm; // handle scope HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)"); gclog_or_tty->print(" VerifyDuringGC:(before)");
@ -1716,7 +1709,7 @@ void ConcurrentMark::cleanup() {
// Note end of marking in all heap regions. // Note end of marking in all heap regions.
double note_end_start = os::elapsedTime(); double note_end_start = os::elapsedTime();
G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state); G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
int n_workers = g1h->workers()->total_workers(); int n_workers = g1h->workers()->total_workers();
g1h->set_par_threads(n_workers); g1h->set_par_threads(n_workers);
@ -1728,9 +1721,14 @@ void ConcurrentMark::cleanup() {
} else { } else {
g1_par_note_end_task.work(0); g1_par_note_end_task.work(0);
} }
g1h->set_unclean_regions_coming(true);
if (!cleanup_list_is_empty()) {
// The cleanup list is not empty, so we'll have to process it
// concurrently. Notify anyone else that might be wanting free
// regions that there will be more free regions coming soon.
g1h->set_free_regions_coming();
}
double note_end_end = os::elapsedTime(); double note_end_end = os::elapsedTime();
// Tell the mutators that there might be unclean regions coming...
if (G1PrintParCleanupStats) { if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
(note_end_end - note_end_start)*1000.0); (note_end_end - note_end_start)*1000.0);
@ -1796,33 +1794,63 @@ void ConcurrentMark::cleanup() {
/* silent */ false, /* silent */ false,
/* prev marking */ true); /* prev marking */ true);
} }
g1h->verify_region_sets_optional();
} }
void ConcurrentMark::completeCleanup() { void ConcurrentMark::completeCleanup() {
// A full collection intervened.
if (has_aborted()) return; if (has_aborted()) return;
int first = 0; G1CollectedHeap* g1h = G1CollectedHeap::heap();
int last = (int)MAX2(ParallelGCThreads, (size_t)1);
for (int t = 0; t < last; t++) { _cleanup_list.verify_optional();
UncleanRegionList* list = &_par_cleanup_thread_state[t]->list; FreeRegionList local_free_list("Local Cleanup List");
assert(list->well_formed(), "Inv");
HeapRegion* hd = list->hd(); if (G1ConcRegionFreeingVerbose) {
while (hd != NULL) { gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
// Now finish up the other stuff. "cleanup list has "SIZE_FORMAT" entries",
hd->rem_set()->clear(); _cleanup_list.length());
HeapRegion* next_hd = hd->next_from_unclean_list(); }
(void)list->pop();
assert(list->hd() == next_hd, "how not?"); // Noone else should be accessing the _cleanup_list at this point,
_g1h->put_region_on_unclean_list(hd); // so it's not necessary to take any locks
if (!hd->isHumongous()) { while (!_cleanup_list.is_empty()) {
// Add this to the _free_regions count by 1. HeapRegion* hr = _cleanup_list.remove_head();
_g1h->finish_free_region_work(0, 0, 1, NULL); assert(hr != NULL, "the list was not empty");
hr->rem_set()->clear();
local_free_list.add_as_tail(hr);
// Instead of adding one region at a time to the secondary_free_list,
// we accumulate them in the local list and move them a few at a
// time. This also cuts down on the number of notify_all() calls
// we do during this process. We'll also append the local list when
// _cleanup_list is empty (which means we just removed the last
// region from the _cleanup_list).
if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
_cleanup_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
"appending "SIZE_FORMAT" entries to the "
"secondary_free_list, clean list still has "
SIZE_FORMAT" entries",
local_free_list.length(),
_cleanup_list.length());
}
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
g1h->secondary_free_list_add_as_tail(&local_free_list);
SecondaryFreeList_lock->notify_all();
}
if (G1StressConcRegionFreeing) {
for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
os::sleep(Thread::current(), (jlong) 1, false);
}
} }
hd = list->hd();
assert(hd == next_hd, "how not?");
} }
} }
assert(local_free_list.is_empty(), "post-condition");
} }
bool G1CMIsAliveClosure::do_object_b(oop obj) { bool G1CMIsAliveClosure::do_object_b(oop obj) {
@ -2894,9 +2922,9 @@ public:
virtual void do_oop( oop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) { template <class T> void do_oop_work(T* p) {
assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant"); assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
assert(!_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), assert(!_g1h->is_on_free_list(
"invariant"); _g1h->heap_region_containing((HeapWord*) p)), "invariant");
oop obj = oopDesc::load_decode_heap_oop(p); oop obj = oopDesc::load_decode_heap_oop(p);
if (_cm->verbose_high()) if (_cm->verbose_high())
@ -3116,8 +3144,8 @@ void CMTask::deal_with_reference(oop obj) {
void CMTask::push(oop obj) { void CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj; HeapWord* objAddr = (HeapWord*) obj;
assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
assert(!_g1h->heap_region_containing(objAddr)->is_on_free_list(), assert(!_g1h->is_on_free_list(
"invariant"); _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
assert(!_g1h->is_obj_ill(obj), "invariant"); assert(!_g1h->is_obj_ill(obj), "invariant");
assert(_nextMarkBitMap->isMarked(objAddr), "invariant"); assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
@ -3362,8 +3390,8 @@ void CMTask::drain_local_queue(bool partially) {
(void*) obj); (void*) obj);
assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
assert(!_g1h->heap_region_containing(obj)->is_on_free_list(), assert(!_g1h->is_on_free_list(
"invariant"); _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
scan_object(obj); scan_object(obj);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegionSets.hpp"
#include "utilities/taskqueue.hpp" #include "utilities/taskqueue.hpp"
class G1CollectedHeap; class G1CollectedHeap;
@ -369,13 +369,7 @@ protected:
double _cleanup_sleep_factor; double _cleanup_sleep_factor;
double _cleanup_task_overhead; double _cleanup_task_overhead;
// Stuff related to age cohort processing. FreeRegionList _cleanup_list;
struct ParCleanupThreadState {
char _pre[64];
UncleanRegionList list;
char _post[64];
};
ParCleanupThreadState** _par_cleanup_thread_state;
// CMS marking support structures // CMS marking support structures
CMBitMap _markBitMap1; CMBitMap _markBitMap1;
@ -484,6 +478,10 @@ protected:
// prints all gathered CM-related statistics // prints all gathered CM-related statistics
void print_stats(); void print_stats();
bool cleanup_list_is_empty() {
return _cleanup_list.is_empty();
}
// accessor methods // accessor methods
size_t parallel_marking_threads() { return _parallel_marking_threads; } size_t parallel_marking_threads() { return _parallel_marking_threads; }
double sleep_factor() { return _sleep_factor; } double sleep_factor() { return _sleep_factor; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -95,8 +95,8 @@ void ConcurrentMarkThread::run() {
_vtime_start = os::elapsedVTime(); _vtime_start = os::elapsedVTime();
wait_for_universe_init(); wait_for_universe_init();
G1CollectedHeap* g1 = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1_policy = g1->g1_policy(); G1CollectorPolicy* g1_policy = g1h->g1_policy();
G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
Thread *current_thread = Thread::current(); Thread *current_thread = Thread::current();
@ -119,7 +119,7 @@ void ConcurrentMarkThread::run() {
if (!g1_policy->in_young_gc_mode()) { if (!g1_policy->in_young_gc_mode()) {
// this ensures the flag is not set if we bail out of the marking // this ensures the flag is not set if we bail out of the marking
// cycle; normally the flag is cleared immediately after cleanup // cycle; normally the flag is cleared immediately after cleanup
g1->set_marking_complete(); g1h->set_marking_complete();
if (g1_policy->adaptive_young_list_length()) { if (g1_policy->adaptive_young_list_length()) {
double now = os::elapsedTime(); double now = os::elapsedTime();
@ -228,10 +228,20 @@ void ConcurrentMarkThread::run() {
VM_CGC_Operation op(&cl_cl, verbose_str); VM_CGC_Operation op(&cl_cl, verbose_str);
VMThread::execute(&op); VMThread::execute(&op);
} else { } else {
G1CollectedHeap::heap()->set_marking_complete(); g1h->set_marking_complete();
} }
if (!cm()->has_aborted()) { // Check if cleanup set the free_regions_coming flag. If it
// hasn't, we can just skip the next step.
if (g1h->free_regions_coming()) {
// The following will finish freeing up any regions that we
// found to be empty during cleanup. We'll do this part
// without joining the suspendible set. If an evacuation pause
// takes places, then we would carry on freeing regions in
// case they are needed by the pause. If a Full GC takes
// places, it would wait for us to process the regions
// reclaimed by cleanup.
double cleanup_start_sec = os::elapsedTime(); double cleanup_start_sec = os::elapsedTime();
if (PrintGC) { if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGCDateStamps);
@ -240,23 +250,22 @@ void ConcurrentMarkThread::run() {
} }
// Now do the remainder of the cleanup operation. // Now do the remainder of the cleanup operation.
_sts.join();
_cm->completeCleanup(); _cm->completeCleanup();
if (!cm()->has_aborted()) { g1_policy->record_concurrent_mark_cleanup_completed();
g1_policy->record_concurrent_mark_cleanup_completed();
double cleanup_end_sec = os::elapsedTime(); double cleanup_end_sec = os::elapsedTime();
if (PrintGC) { if (PrintGC) {
gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps); gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]", gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
cleanup_end_sec - cleanup_start_sec); cleanup_end_sec - cleanup_start_sec);
}
} }
_sts.leave();
// We're done: no more free regions coming.
g1h->reset_free_regions_coming();
} }
// We're done: no more unclean regions coming. guarantee(cm()->cleanup_list_is_empty(),
G1CollectedHeap::heap()->set_unclean_regions_coming(false); "at this point there should be no regions on the cleanup list");
if (cm()->has_aborted()) { if (cm()->has_aborted()) {
if (PrintGC) { if (PrintGC) {
@ -278,7 +287,7 @@ void ConcurrentMarkThread::run() {
// Java thread is waiting for a full GC to happen (e.g., it // Java thread is waiting for a full GC to happen (e.g., it
// called System.gc() with +ExplicitGCInvokesConcurrent). // called System.gc() with +ExplicitGCInvokesConcurrent).
_sts.join(); _sts.join();
g1->increment_full_collections_completed(true /* concurrent */); g1h->increment_full_collections_completed(true /* concurrent */);
_sts.leave(); _sts.leave();
} }
assert(_should_terminate, "just checking"); assert(_should_terminate, "just checking");

View File

@ -1,194 +0,0 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentZFThread.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/space.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/copy.hpp"
// ======= Concurrent Zero-Fill Thread ========
// The CM thread is created when the G1 garbage collector is used
int ConcurrentZFThread::_region_allocs = 0;
int ConcurrentZFThread::_sync_zfs = 0;
int ConcurrentZFThread::_zf_waits = 0;
int ConcurrentZFThread::_regions_filled = 0;
ConcurrentZFThread::ConcurrentZFThread() :
ConcurrentGCThread()
{
create_and_start();
}
void ConcurrentZFThread::wait_for_ZF_completed(HeapRegion* hr) {
assert(ZF_mon->owned_by_self(), "Precondition.");
note_zf_wait();
while (hr->zero_fill_state() == HeapRegion::ZeroFilling) {
ZF_mon->wait(Mutex::_no_safepoint_check_flag);
}
}
void ConcurrentZFThread::processHeapRegion(HeapRegion* hr) {
assert(!Universe::heap()->is_gc_active(),
"This should not happen during GC.");
assert(hr != NULL, "Precondition");
// These are unlocked reads, but if this test is successful, then no
// other thread will attempt this zero filling. Only a GC thread can
// modify the ZF state of a region whose state is zero-filling, and this
// should only happen while the ZF thread is locking out GC.
if (hr->zero_fill_state() == HeapRegion::ZeroFilling
&& hr->zero_filler() == Thread::current()) {
assert(hr->top() == hr->bottom(), "better be empty!");
assert(!hr->isHumongous(), "Only free regions on unclean list.");
Copy::fill_to_words(hr->bottom(), hr->capacity()/HeapWordSize);
note_region_filled();
}
}
void ConcurrentZFThread::run() {
initialize_in_thread();
Thread* thr_self = Thread::current();
_vtime_start = os::elapsedVTime();
wait_for_universe_init();
G1CollectedHeap* g1 = G1CollectedHeap::heap();
_sts.join();
while (!_should_terminate) {
_sts.leave();
{
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
// This local variable will hold a region being zero-filled. This
// region will neither be on the unclean or zero-filled lists, and
// will not be available for allocation; thus, we might have an
// allocation fail, causing a full GC, because of this, but this is a
// price we will pay. (In future, we might want to make the fact
// that there's a region being zero-filled apparent to the G1 heap,
// which could then wait for it in this extreme case...)
HeapRegion* to_fill;
while (!g1->should_zf()
|| (to_fill = g1->pop_unclean_region_list_locked()) == NULL)
ZF_mon->wait(Mutex::_no_safepoint_check_flag);
while (to_fill->zero_fill_state() == HeapRegion::ZeroFilling)
ZF_mon->wait(Mutex::_no_safepoint_check_flag);
// So now to_fill is non-NULL and is not ZeroFilling. It might be
// Allocated or ZeroFilled. (The latter could happen if this thread
// starts the zero-filling of a region, but a GC intervenes and
// pushes new regions needing on the front of the filling on the
// front of the list.)
switch (to_fill->zero_fill_state()) {
case HeapRegion::Allocated:
to_fill = NULL;
break;
case HeapRegion::NotZeroFilled:
to_fill->set_zero_fill_in_progress(thr_self);
ZF_mon->unlock();
_sts.join();
processHeapRegion(to_fill);
_sts.leave();
ZF_mon->lock_without_safepoint_check();
if (to_fill->zero_fill_state() == HeapRegion::ZeroFilling
&& to_fill->zero_filler() == thr_self) {
to_fill->set_zero_fill_complete();
(void)g1->put_free_region_on_list_locked(to_fill);
}
break;
case HeapRegion::ZeroFilled:
(void)g1->put_free_region_on_list_locked(to_fill);
break;
case HeapRegion::ZeroFilling:
ShouldNotReachHere();
break;
}
}
_vtime_accum = (os::elapsedVTime() - _vtime_start);
_sts.join();
}
_sts.leave();
assert(_should_terminate, "just checking");
terminate();
}
bool ConcurrentZFThread::offer_yield() {
if (_sts.should_yield()) {
_sts.yield("Concurrent ZF");
return true;
} else {
return false;
}
}
void ConcurrentZFThread::stop() {
// it is ok to take late safepoints here, if needed
MutexLockerEx mu(Terminator_lock);
_should_terminate = true;
while (!_has_terminated) {
Terminator_lock->wait();
}
}
void ConcurrentZFThread::print() const {
print_on(tty);
}
void ConcurrentZFThread::print_on(outputStream* st) const {
st->print("\"G1 Concurrent Zero-Fill Thread\" ");
Thread::print_on(st);
st->cr();
}
double ConcurrentZFThread::_vtime_accum;
void ConcurrentZFThread::print_summary_info() {
gclog_or_tty->print("\nConcurrent Zero-Filling:\n");
gclog_or_tty->print(" Filled %d regions, used %5.2fs.\n",
_regions_filled,
vtime_accum());
gclog_or_tty->print(" Of %d region allocs, %d (%5.2f%%) required sync ZF,\n",
_region_allocs, _sync_zfs,
(_region_allocs > 0 ?
(float)_sync_zfs/(float)_region_allocs*100.0 :
0.0));
gclog_or_tty->print(" and %d (%5.2f%%) required a ZF wait.\n",
_zf_waits,
(_region_allocs > 0 ?
(float)_zf_waits/(float)_region_allocs*100.0 :
0.0));
}

View File

@ -1,91 +0,0 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
#include "gc_implementation/shared/concurrentGCThread.hpp"
// The Concurrent ZF Thread. Performs concurrent zero-filling.
class ConcurrentZFThread: public ConcurrentGCThread {
friend class VMStructs;
friend class ZeroFillRegionClosure;
private:
// Zero fill the heap region.
void processHeapRegion(HeapRegion* r);
// Stats
// Allocation (protected by heap lock).
static int _region_allocs; // Number of regions allocated
static int _sync_zfs; // Synchronous zero-fills +
static int _zf_waits; // Wait for conc zero-fill completion.
// Number of regions CFZ thread fills.
static int _regions_filled;
double _vtime_start; // Initial virtual time.
// These are static because the "print_summary_info" method is, and
// it currently assumes there is only one ZF thread. We'll change when
// we need to.
static double _vtime_accum; // Initial virtual time.
static double vtime_accum() { return _vtime_accum; }
// Offer yield for GC. Returns true if yield occurred.
bool offer_yield();
public:
// Constructor
ConcurrentZFThread();
// Main loop.
virtual void run();
// Printing
void print_on(outputStream* st) const;
void print() const;
// Waits until "r" has been zero-filled. Requires caller to hold the
// ZF_mon.
static void wait_for_ZF_completed(HeapRegion* r);
// Get or clear the current unclean region. Should be done
// while holding the ZF_needed_mon lock.
// shutdown
void stop();
// Stats
static void note_region_alloc() {_region_allocs++; }
static void note_sync_zfs() { _sync_zfs++; }
static void note_zf_wait() { _zf_waits++; }
static void note_region_filled() { _regions_filled++; }
static void print_summary_info();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -222,7 +222,7 @@ void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
// Action_mark - update the BOT for the block [blk_start, blk_end). // Action_mark - update the BOT for the block [blk_start, blk_end).
// Current typical use is for splitting a block. // Current typical use is for splitting a block.
// Action_single - udpate the BOT for an allocation. // Action_single - update the BOT for an allocation.
// Action_verify - BOT verification. // Action_verify - BOT verification.
void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start, void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
HeapWord* blk_end, HeapWord* blk_end,
@ -331,47 +331,6 @@ G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
do_block_internal(blk_start, blk_end, Action_mark); do_block_internal(blk_start, blk_end, Action_mark);
} }
void G1BlockOffsetArray::join_blocks(HeapWord* blk1, HeapWord* blk2) {
HeapWord* blk1_start = Universe::heap()->block_start(blk1);
HeapWord* blk2_start = Universe::heap()->block_start(blk2);
assert(blk1 == blk1_start && blk2 == blk2_start,
"Must be block starts.");
assert(blk1 + _sp->block_size(blk1) == blk2, "Must be contiguous.");
size_t blk1_start_index = _array->index_for(blk1);
size_t blk2_start_index = _array->index_for(blk2);
assert(blk1_start_index <= blk2_start_index, "sanity");
HeapWord* blk2_card_start = _array->address_for_index(blk2_start_index);
if (blk2 == blk2_card_start) {
// blk2 starts a card. Does blk1 start on the prevous card, or futher
// back?
assert(blk1_start_index < blk2_start_index, "must be lower card.");
if (blk1_start_index + 1 == blk2_start_index) {
// previous card; new value for blk2 card is size of blk1.
_array->set_offset_array(blk2_start_index, (u_char) _sp->block_size(blk1));
} else {
// Earlier card; go back a card.
_array->set_offset_array(blk2_start_index, N_words);
}
} else {
// blk2 does not start a card. Does it cross a card? If not, nothing
// to do.
size_t blk2_end_index =
_array->index_for(blk2 + _sp->block_size(blk2) - 1);
assert(blk2_end_index >= blk2_start_index, "sanity");
if (blk2_end_index > blk2_start_index) {
// Yes, it crosses a card. The value for the next card must change.
if (blk1_start_index + 1 == blk2_start_index) {
// previous card; new value for second blk2 card is size of blk1.
_array->set_offset_array(blk2_start_index + 1,
(u_char) _sp->block_size(blk1));
} else {
// Earlier card; go back a card.
_array->set_offset_array(blk2_start_index + 1, N_words);
}
}
}
}
HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) { HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
assert(_bottom <= addr && addr < _end, assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array"); "addr must be covered by this Array");
@ -580,16 +539,51 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
#endif #endif
} }
void bool
G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) { G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
assert(_end == new_end, "_end should have already been updated"); size_t word_size) const {
size_t first_card = _array->index_for(obj_start);
// The first BOT entry should have offset 0. size_t last_card = _array->index_for(obj_start + word_size - 1);
_array->set_offset_array(_array->index_for(_bottom), 0); if (!_array->is_card_boundary(obj_start)) {
// The rest should point to the first one. // If the object is not on a card boundary the BOT entry of the
set_remainder_to_point_to_start(_bottom + N_words, new_end); // first card should point to another object so we should not
// check that one.
first_card += 1;
}
for (size_t card = first_card; card <= last_card; card += 1) {
HeapWord* card_addr = _array->address_for_index(card);
HeapWord* block_start = block_start_const(card_addr);
if (block_start != obj_start) {
gclog_or_tty->print_cr("block start: "PTR_FORMAT" is incorrect - "
"card index: "SIZE_FORMAT" "
"card addr: "PTR_FORMAT" BOT entry: %u "
"obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
"cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
block_start, card, card_addr,
_array->offset_array(card),
obj_start, word_size, first_card, last_card);
return false;
}
}
return true;
} }
#ifndef PRODUCT
void
G1BlockOffsetArray::print_on(outputStream* out) {
size_t from_index = _array->index_for(_bottom);
size_t to_index = _array->index_for(_end);
out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
"cards ["SIZE_FORMAT","SIZE_FORMAT")",
_bottom, _end, from_index, to_index);
for (size_t i = from_index; i < to_index; ++i) {
out->print_cr(" entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
i, _array->address_for_index(i),
(uint) _array->offset_array(i));
}
}
#endif // !PRODUCT
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// G1BlockOffsetArrayContigSpace // G1BlockOffsetArrayContigSpace
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
@ -641,10 +635,20 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
} }
void void
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) { G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
G1BlockOffsetArray::set_for_starts_humongous(new_end); assert(new_top <= _end, "_end should have already been updated");
// Make sure _next_offset_threshold and _next_offset_index point to new_end. // The first BOT entry should have offset 0.
_next_offset_threshold = new_end; zero_bottom_entry();
_next_offset_index = _array->index_for(new_end); initialize_threshold();
alloc_block(_bottom, new_top);
}
#ifndef PRODUCT
void
G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
G1BlockOffsetArray::print_on(out);
out->print_cr(" next offset threshold: "PTR_FORMAT, _next_offset_threshold);
out->print_cr(" next offset index: "SIZE_FORMAT, _next_offset_index);
} }
#endif // !PRODUCT

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -352,11 +352,6 @@ public:
// The following methods are useful and optimized for a // The following methods are useful and optimized for a
// general, non-contiguous space. // general, non-contiguous space.
// The given arguments are required to be the starts of adjacent ("blk1"
// before "blk2") well-formed blocks covered by "this". After this call,
// they should be considered to form one block.
virtual void join_blocks(HeapWord* blk1, HeapWord* blk2);
// Given a block [blk_start, blk_start + full_blk_size), and // Given a block [blk_start, blk_start + full_blk_size), and
// a left_blk_size < full_blk_size, adjust the BOT to show two // a left_blk_size < full_blk_size, adjust the BOT to show two
// blocks [blk_start, blk_start + left_blk_size) and // blocks [blk_start, blk_start + left_blk_size) and
@ -429,6 +424,12 @@ public:
verify_single_block(blk, blk + size); verify_single_block(blk, blk + size);
} }
// Used by region verification. Checks that the contents of the
// BOT reflect that there's a single object that spans the address
// range [obj_start, obj_start + word_size); returns true if this is
// the case, returns false if it's not.
bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
// Verify that the given block is before _unallocated_block // Verify that the given block is before _unallocated_block
inline void verify_not_unallocated(HeapWord* blk_start, inline void verify_not_unallocated(HeapWord* blk_start,
HeapWord* blk_end) const { HeapWord* blk_end) const {
@ -444,7 +445,7 @@ public:
void check_all_cards(size_t left_card, size_t right_card) const; void check_all_cards(size_t left_card, size_t right_card) const;
virtual void set_for_starts_humongous(HeapWord* new_end); virtual void print_on(outputStream* out) PRODUCT_RETURN;
}; };
// A subtype of BlockOffsetArray that takes advantage of the fact // A subtype of BlockOffsetArray that takes advantage of the fact
@ -494,7 +495,9 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
HeapWord* block_start_unsafe(const void* addr); HeapWord* block_start_unsafe(const void* addr);
HeapWord* block_start_unsafe_const(const void* addr) const; HeapWord* block_start_unsafe_const(const void* addr) const;
virtual void set_for_starts_humongous(HeapWord* new_end); void set_for_starts_humongous(HeapWord* new_top);
virtual void print_on(outputStream* out) PRODUCT_RETURN;
}; };
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp" #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
#include "memory/barrierSet.hpp" #include "memory/barrierSet.hpp"
#include "memory/memRegion.hpp" #include "memory/memRegion.hpp"
@ -66,8 +66,7 @@ typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
enum G1GCThreadGroups { enum G1GCThreadGroups {
G1CRGroup = 0, G1CRGroup = 0,
G1ZFGroup = 1, G1ZFGroup = 1,
G1CMGroup = 2, G1CMGroup = 2
G1CLGroup = 3
}; };
enum GCAllocPurpose { enum GCAllocPurpose {
@ -155,6 +154,7 @@ class G1CollectedHeap : public SharedHeap {
friend class RefineCardTableEntryClosure; friend class RefineCardTableEntryClosure;
friend class G1PrepareCompactClosure; friend class G1PrepareCompactClosure;
friend class RegionSorter; friend class RegionSorter;
friend class RegionResetter;
friend class CountRCClosure; friend class CountRCClosure;
friend class EvacPopObjClosure; friend class EvacPopObjClosure;
friend class G1ParCleanupCTTask; friend class G1ParCleanupCTTask;
@ -178,17 +178,20 @@ private:
// The maximum part of _g1_storage that has ever been committed. // The maximum part of _g1_storage that has ever been committed.
MemRegion _g1_max_committed; MemRegion _g1_max_committed;
// The number of regions that are completely free. // The master free list. It will satisfy all new region allocations.
size_t _free_regions; MasterFreeRegionList _free_list;
// The secondary free list which contains regions that have been
// freed up during the cleanup process. This will be appended to the
// master free list when appropriate.
SecondaryFreeRegionList _secondary_free_list;
// It keeps track of the humongous regions.
MasterHumongousRegionSet _humongous_set;
// The number of regions we could create by expansion. // The number of regions we could create by expansion.
size_t _expansion_regions; size_t _expansion_regions;
// Return the number of free regions in the heap (by direct counting.)
size_t count_free_regions();
// Return the number of free regions on the free and unclean lists.
size_t count_free_regions_list();
// The block offset table for the G1 heap. // The block offset table for the G1 heap.
G1BlockOffsetSharedArray* _bot_shared; G1BlockOffsetSharedArray* _bot_shared;
@ -196,9 +199,6 @@ private:
// lists, before and after full GC. // lists, before and after full GC.
void tear_down_region_lists(); void tear_down_region_lists();
void rebuild_region_lists(); void rebuild_region_lists();
// This sets all non-empty regions to need zero-fill (which they will if
// they are empty after full collection.)
void set_used_regions_to_need_zero_fill();
// The sequence of all heap regions in the heap. // The sequence of all heap regions in the heap.
HeapRegionSeq* _hrs; HeapRegionSeq* _hrs;
@ -231,7 +231,7 @@ private:
// Determines PLAB size for a particular allocation purpose. // Determines PLAB size for a particular allocation purpose.
static size_t desired_plab_sz(GCAllocPurpose purpose); static size_t desired_plab_sz(GCAllocPurpose purpose);
// When called by par thread, require par_alloc_during_gc_lock() to be held. // When called by par thread, requires the FreeList_lock to be held.
void push_gc_alloc_region(HeapRegion* hr); void push_gc_alloc_region(HeapRegion* hr);
// This should only be called single-threaded. Undeclares all GC alloc // This should only be called single-threaded. Undeclares all GC alloc
@ -294,10 +294,11 @@ private:
// line number, file, etc. // line number, file, etc.
#define heap_locking_asserts_err_msg(__extra_message) \ #define heap_locking_asserts_err_msg(__extra_message) \
err_msg("%s : Heap_lock %slocked, %sat a safepoint", \ err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
(__extra_message), \ (__extra_message), \
(!Heap_lock->owned_by_self()) ? "NOT " : "", \ BOOL_TO_STR(Heap_lock->owned_by_self()), \
(!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "") BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
BOOL_TO_STR(Thread::current()->is_VM_thread()))
#define assert_heap_locked() \ #define assert_heap_locked() \
do { \ do { \
@ -305,10 +306,11 @@ private:
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
} while (0) } while (0)
#define assert_heap_locked_or_at_safepoint() \ #define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \
do { \ do { \
assert(Heap_lock->owned_by_self() || \ assert(Heap_lock->owned_by_self() || \
SafepointSynchronize::is_at_safepoint(), \ (SafepointSynchronize::is_at_safepoint() && \
((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
"should be at a safepoint")); \ "should be at a safepoint")); \
} while (0) } while (0)
@ -335,9 +337,10 @@ private:
"should not be at a safepoint")); \ "should not be at a safepoint")); \
} while (0) } while (0)
#define assert_at_safepoint() \ #define assert_at_safepoint(__should_be_vm_thread) \
do { \ do { \
assert(SafepointSynchronize::is_at_safepoint(), \ assert(SafepointSynchronize::is_at_safepoint() && \
((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \
heap_locking_asserts_err_msg("should be at a safepoint")); \ heap_locking_asserts_err_msg("should be at a safepoint")); \
} while (0) } while (0)
@ -362,31 +365,41 @@ protected:
// The current policy object for the collector. // The current policy object for the collector.
G1CollectorPolicy* _g1_policy; G1CollectorPolicy* _g1_policy;
// Parallel allocation lock to protect the current allocation region. // This is the second level of trying to allocate a new region. If
Mutex _par_alloc_during_gc_lock; // new_region_work didn't find a region in the free_list, this call
Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } // will check whether there's anything available in the
// secondary_free_list and/or wait for more regions to appear in that
// list, if _free_regions_coming is set.
HeapRegion* new_region_try_secondary_free_list(size_t word_size);
// If possible/desirable, allocate a new HeapRegion for normal object // It will try to allocate a single non-humongous HeapRegion
// allocation sufficient for an allocation of the given "word_size". // sufficient for an allocation of the given word_size. If
// If "do_expand" is true, will attempt to expand the heap if necessary // do_expand is true, it will attempt to expand the heap if
// to to satisfy the request. If "zero_filled" is true, requires a // necessary to satisfy the allocation request. Note that word_size
// zero-filled region. // is only used to make sure that we expand sufficiently but, given
// (Returning NULL will trigger a GC.) // that the allocation request is assumed not to be humongous,
virtual HeapRegion* newAllocRegion_work(size_t word_size, // having word_size is not strictly necessary (expanding by a single
bool do_expand, // region will always be sufficient). But let's keep that parameter
bool zero_filled); // in case we need it in the future.
HeapRegion* new_region_work(size_t word_size, bool do_expand);
virtual HeapRegion* newAllocRegion(size_t word_size, // It will try to allocate a new region to be used for allocation by
bool zero_filled = true) { // mutator threads. It will not try to expand the heap if not region
return newAllocRegion_work(word_size, false, zero_filled); // is available.
HeapRegion* new_alloc_region(size_t word_size) {
return new_region_work(word_size, false /* do_expand */);
} }
virtual HeapRegion* newAllocRegionWithExpansion(int purpose,
size_t word_size, // It will try to allocate a new region to be used for allocation by
bool zero_filled = true); // a GC thread. It will try to expand the heap if no region is
// available.
HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
// Attempt to allocate an object of the given (very large) "word_size". // Attempt to allocate an object of the given (very large) "word_size".
// Returns "NULL" on failure. // Returns "NULL" on failure.
virtual HeapWord* humongous_obj_allocate(size_t word_size); HeapWord* humongous_obj_allocate(size_t word_size);
// The following two methods, allocate_new_tlab() and // The following two methods, allocate_new_tlab() and
// mem_allocate(), are the two main entry points from the runtime // mem_allocate(), are the two main entry points from the runtime
@ -430,7 +443,8 @@ protected:
bool* gc_overhead_limit_was_exceeded); bool* gc_overhead_limit_was_exceeded);
// The following methods, allocate_from_cur_allocation_region(), // The following methods, allocate_from_cur_allocation_region(),
// attempt_allocation(), replace_cur_alloc_region_and_allocate(), // attempt_allocation(), attempt_allocation_locked(),
// replace_cur_alloc_region_and_allocate(),
// attempt_allocation_slow(), and attempt_allocation_humongous() // attempt_allocation_slow(), and attempt_allocation_humongous()
// have very awkward pre- and post-conditions with respect to // have very awkward pre- and post-conditions with respect to
// locking: // locking:
@ -481,20 +495,30 @@ protected:
// successfully manage to allocate it, or NULL. // successfully manage to allocate it, or NULL.
// It tries to satisfy an allocation request out of the current // It tries to satisfy an allocation request out of the current
// allocating region, which is passed as a parameter. It assumes // alloc region, which is passed as a parameter. It assumes that the
// that the caller has checked that the current allocating region is // caller has checked that the current alloc region is not NULL.
// not NULL. Given that the caller has to check the current // Given that the caller has to check the current alloc region for
// allocating region for at least NULL, it might as well pass it as // at least NULL, it might as well pass it as the first parameter so
// the first parameter so that the method doesn't have to read it // that the method doesn't have to read it from the
// from the _cur_alloc_region field again. // _cur_alloc_region field again. It is called from both
// attempt_allocation() and attempt_allocation_locked() and the
// with_heap_lock parameter indicates whether the caller was holding
// the heap lock when it called it or not.
inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
size_t word_size); size_t word_size,
bool with_heap_lock);
// It attempts to allocate out of the current alloc region. If that // First-level of allocation slow path: it attempts to allocate out
// fails, it retires the current alloc region (if there is one), // of the current alloc region in a lock-free manner using a CAS. If
// tries to get a new one and retries the allocation. // that fails it takes the Heap_lock and calls
// attempt_allocation_locked() for the second-level slow path.
inline HeapWord* attempt_allocation(size_t word_size); inline HeapWord* attempt_allocation(size_t word_size);
// Second-level of allocation slow path: while holding the Heap_lock
// it tries to allocate out of the current alloc region and, if that
// fails, tries to allocate out of a new current alloc region.
inline HeapWord* attempt_allocation_locked(size_t word_size);
// It assumes that the current alloc region has been retired and // It assumes that the current alloc region has been retired and
// tries to allocate a new one. If it's successful, it performs the // tries to allocate a new one. If it's successful, it performs the
// allocation out of the new current alloc region and updates // allocation out of the new current alloc region and updates
@ -506,11 +530,11 @@ protected:
bool do_dirtying, bool do_dirtying,
bool can_expand); bool can_expand);
// The slow path when we are unable to allocate a new current alloc // Third-level of allocation slow path: when we are unable to
// region to satisfy an allocation request (i.e., when // allocate a new current alloc region to satisfy an allocation
// attempt_allocation() fails). It will try to do an evacuation // request (i.e., when attempt_allocation_locked() fails). It will
// pause, which might stall due to the GC locker, and retry the // try to do an evacuation pause, which might stall due to the GC
// allocation attempt when appropriate. // locker, and retry the allocation attempt when appropriate.
HeapWord* attempt_allocation_slow(size_t word_size); HeapWord* attempt_allocation_slow(size_t word_size);
// The method that tries to satisfy a humongous allocation // The method that tries to satisfy a humongous allocation
@ -749,20 +773,29 @@ protected:
// Invoke "save_marks" on all heap regions. // Invoke "save_marks" on all heap regions.
void save_marks(); void save_marks();
// Free a heap region. // It frees a non-humongous region by initializing its contents and
void free_region(HeapRegion* hr); // adding it to the free list that's passed as a parameter (this is
// A component of "free_region", exposed for 'batching'. // usually a local list which will be appended to the master free
// All the params after "hr" are out params: the used bytes of the freed // list later). The used bytes of freed regions are accumulated in
// region(s), the number of H regions cleared, the number of regions // pre_used. If par is true, the region's RSet will not be freed
// freed, and pointers to the head and tail of a list of freed contig // up. The assumption is that this will be done later.
// regions, linked throught the "next_on_unclean_list" field. void free_region(HeapRegion* hr,
void free_region_work(HeapRegion* hr, size_t* pre_used,
size_t& pre_used, FreeRegionList* free_list,
size_t& cleared_h, bool par);
size_t& freed_regions,
UncleanRegionList* list,
bool par = false);
// It frees a humongous region by collapsing it into individual
// regions and calling free_region() for each of them. The freed
// regions will be added to the free list that's passed as a parameter
// (this is usually a local list which will be appended to the
// master free list later). The used bytes of freed regions are
// accumulated in pre_used. If par is true, the region's RSet will
// not be freed up. The assumption is that this will be done later.
void free_humongous_region(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
HumongousRegionSet* humongous_proxy_set,
bool par);
// The concurrent marker (and the thread it runs in.) // The concurrent marker (and the thread it runs in.)
ConcurrentMark* _cm; ConcurrentMark* _cm;
@ -772,9 +805,6 @@ protected:
// The concurrent refiner. // The concurrent refiner.
ConcurrentG1Refine* _cg1r; ConcurrentG1Refine* _cg1r;
// The concurrent zero-fill thread.
ConcurrentZFThread* _czft;
// The parallel task queues // The parallel task queues
RefToScanQueueSet *_task_queues; RefToScanQueueSet *_task_queues;
@ -826,7 +856,6 @@ protected:
void finalize_for_evac_failure(); void finalize_for_evac_failure();
// An attempt to evacuate "obj" has failed; take necessary steps. // An attempt to evacuate "obj" has failed; take necessary steps.
void handle_evacuation_failure(oop obj);
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m); void handle_evacuation_failure_common(oop obj, markOop m);
@ -867,9 +896,7 @@ protected:
SubTasksDone* _process_strong_tasks; SubTasksDone* _process_strong_tasks;
// List of regions which require zero filling. volatile bool _free_regions_coming;
UncleanRegionList _unclean_region_list;
bool _unclean_regions_coming;
public: public:
@ -992,71 +1019,64 @@ public:
size_t max_regions(); size_t max_regions();
// The number of regions that are completely free. // The number of regions that are completely free.
size_t free_regions(); size_t free_regions() {
return _free_list.length();
}
// The number of regions that are not completely free. // The number of regions that are not completely free.
size_t used_regions() { return n_regions() - free_regions(); } size_t used_regions() { return n_regions() - free_regions(); }
// True iff the ZF thread should run.
bool should_zf();
// The number of regions available for "regular" expansion. // The number of regions available for "regular" expansion.
size_t expansion_regions() { return _expansion_regions; } size_t expansion_regions() { return _expansion_regions; }
#ifndef PRODUCT // verify_region_sets() performs verification over the region
bool regions_accounted_for(); // lists. It will be compiled in the product code to be used when
bool print_region_accounting_info(); // necessary (i.e., during heap verification).
void print_region_counts(); void verify_region_sets();
#endif
HeapRegion* alloc_region_from_unclean_list(bool zero_filled); // verify_region_sets_optional() is planted in the code for
HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); // list verification in non-product builds (and it can be enabled in
// product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
#if HEAP_REGION_SET_FORCE_VERIFY
void verify_region_sets_optional() {
verify_region_sets();
}
#else // HEAP_REGION_SET_FORCE_VERIFY
void verify_region_sets_optional() { }
#endif // HEAP_REGION_SET_FORCE_VERIFY
void put_region_on_unclean_list(HeapRegion* r); #ifdef ASSERT
void put_region_on_unclean_list_locked(HeapRegion* r); bool is_on_free_list(HeapRegion* hr) {
return hr->containing_set() == &_free_list;
}
void prepend_region_list_on_unclean_list(UncleanRegionList* list); bool is_on_humongous_set(HeapRegion* hr) {
void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); return hr->containing_set() == &_humongous_set;
}
#endif // ASSERT
void set_unclean_regions_coming(bool b); // Wrapper for the region list operations that can be called from
void set_unclean_regions_coming_locked(bool b); // methods outside this class.
// Wait for cleanup to be complete.
void wait_for_cleanup_complete();
// Like above, but assumes that the calling thread owns the Heap_lock.
void wait_for_cleanup_complete_locked();
// Return the head of the unclean list. void secondary_free_list_add_as_tail(FreeRegionList* list) {
HeapRegion* peek_unclean_region_list_locked(); _secondary_free_list.add_as_tail(list);
// Remove and return the head of the unclean list. }
HeapRegion* pop_unclean_region_list_locked();
// List of regions which are zero filled and ready for allocation. void append_secondary_free_list() {
HeapRegion* _free_region_list; _free_list.add_as_tail(&_secondary_free_list);
// Number of elements on the free list. }
size_t _free_region_list_size;
// If the head of the unclean list is ZeroFilled, move it to the free void append_secondary_free_list_if_not_empty() {
// list. if (!_secondary_free_list.is_empty()) {
bool move_cleaned_region_to_free_list_locked(); MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
bool move_cleaned_region_to_free_list(); append_secondary_free_list();
}
}
void put_free_region_on_list_locked(HeapRegion* r); void set_free_regions_coming();
void put_free_region_on_list(HeapRegion* r); void reset_free_regions_coming();
bool free_regions_coming() { return _free_regions_coming; }
// Remove and return the head element of the free list. void wait_while_free_regions_coming();
HeapRegion* pop_free_region_list_locked();
// If "zero_filled" is true, we first try the free list, then we try the
// unclean list, zero-filling the result. If "zero_filled" is false, we
// first try the unclean list, then the zero-filled list.
HeapRegion* alloc_free_region_from_lists(bool zero_filled);
// Verify the integrity of the region lists.
void remove_allocated_regions_from_lists();
bool verify_region_lists();
bool verify_region_lists_locked();
size_t unclean_region_list_length();
size_t free_region_list_length();
// Perform a collection of the heap; intended for use in implementing // Perform a collection of the heap; intended for use in implementing
// "System.gc". This probably implies as full a collection as the // "System.gc". This probably implies as full a collection as the
@ -1075,23 +1095,24 @@ public:
// True iff a evacuation has failed in the most-recent collection. // True iff a evacuation has failed in the most-recent collection.
bool evacuation_failed() { return _evacuation_failed; } bool evacuation_failed() { return _evacuation_failed; }
// Free a region if it is totally full of garbage. Returns the number of // It will free a region if it has allocated objects in it that are
// bytes freed (0 ==> didn't free it). // all dead. It calls either free_region() or
size_t free_region_if_totally_empty(HeapRegion *hr); // free_humongous_region() depending on the type of the region that
void free_region_if_totally_empty_work(HeapRegion *hr, // is passed to it.
size_t& pre_used, void free_region_if_totally_empty(HeapRegion* hr,
size_t& cleared_h_regions, size_t* pre_used,
size_t& freed_regions, FreeRegionList* free_list,
UncleanRegionList* list, HumongousRegionSet* humongous_proxy_set,
bool par = false); bool par);
// If we've done free region work that yields the given changes, update
// the relevant global variables.
void finish_free_region_work(size_t pre_used,
size_t cleared_h_regions,
size_t freed_regions,
UncleanRegionList* list);
// It appends the free list to the master free list and updates the
// master humongous list according to the contents of the proxy
// list. It also adjusts the total used bytes according to pre_used
// (if par is true, it will do so by taking the ParGCRareEvent_lock).
void update_sets_after_freeing_regions(size_t pre_used,
FreeRegionList* free_list,
HumongousRegionSet* humongous_proxy_set,
bool par);
// Returns "TRUE" iff "p" points into the allocated area of the heap. // Returns "TRUE" iff "p" points into the allocated area of the heap.
virtual bool is_in(const void* p) const; virtual bool is_in(const void* p) const;
@ -1304,8 +1325,6 @@ public:
return true; return true;
} }
virtual bool allocs_are_zero_filled();
// The boundary between a "large" and "small" array of primitives, in // The boundary between a "large" and "small" array of primitives, in
// words. // words.
virtual size_t large_typearray_limit(); virtual size_t large_typearray_limit();
@ -1536,13 +1555,6 @@ public:
protected: protected:
size_t _max_heap_capacity; size_t _max_heap_capacity;
public:
// Temporary: call to mark things unimplemented for the G1 heap (e.g.,
// MemoryService). In productization, we can make this assert false
// to catch such places (as well as searching for calls to this...)
static void g1_unimplemented();
}; };
#define use_local_bitmaps 1 #define use_local_bitmaps 1

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "utilities/taskqueue.hpp" #include "utilities/taskqueue.hpp"
// Inline functions for G1CollectedHeap // Inline functions for G1CollectedHeap
@ -63,10 +63,12 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
// assumptions of this method (and other related ones). // assumptions of this method (and other related ones).
inline HeapWord* inline HeapWord*
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
size_t word_size) { size_t word_size,
assert_heap_locked_and_not_at_safepoint(); bool with_heap_lock) {
assert_not_at_safepoint();
assert(with_heap_lock == Heap_lock->owned_by_self(),
"with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
assert(cur_alloc_region != NULL, "pre-condition of the method"); assert(cur_alloc_region != NULL, "pre-condition of the method");
assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
assert(cur_alloc_region->is_young(), assert(cur_alloc_region->is_young(),
"we only support young current alloc regions"); "we only support young current alloc regions");
assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() " assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
@ -76,20 +78,24 @@ G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
assert(!cur_alloc_region->is_empty(), assert(!cur_alloc_region->is_empty(),
err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty", err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
cur_alloc_region->bottom(), cur_alloc_region->end())); cur_alloc_region->bottom(), cur_alloc_region->end()));
// This allocate method does BOT updates and we don't need them in HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
// the young generation. This will be fixed in the near future by
// CR 6994297.
HeapWord* result = cur_alloc_region->allocate(word_size);
if (result != NULL) { if (result != NULL) {
assert(is_in(result), "result should be in the heap"); assert(is_in(result), "result should be in the heap");
Heap_lock->unlock();
if (with_heap_lock) {
Heap_lock->unlock();
}
assert_heap_not_locked();
// Do the dirtying after we release the Heap_lock. // Do the dirtying after we release the Heap_lock.
dirty_young_block(result, word_size); dirty_young_block(result, word_size);
return result; return result;
} }
assert_heap_locked(); if (with_heap_lock) {
assert_heap_locked();
} else {
assert_heap_not_locked();
}
return NULL; return NULL;
} }
@ -97,26 +103,75 @@ G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
// assumptions of this method (and other related ones). // assumptions of this method (and other related ones).
inline HeapWord* inline HeapWord*
G1CollectedHeap::attempt_allocation(size_t word_size) { G1CollectedHeap::attempt_allocation(size_t word_size) {
assert_heap_locked_and_not_at_safepoint(); assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation() should not be called " assert(!isHumongous(word_size), "attempt_allocation() should not be called "
"for humongous allocation requests"); "for humongous allocation requests");
HeapRegion* cur_alloc_region = _cur_alloc_region; HeapRegion* cur_alloc_region = _cur_alloc_region;
if (cur_alloc_region != NULL) { if (cur_alloc_region != NULL) {
HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region, HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
word_size); word_size,
false /* with_heap_lock */);
assert_heap_not_locked();
if (result != NULL) {
return result;
}
}
// Our attempt to allocate lock-free failed as the current
// allocation region is either NULL or full. So, we'll now take the
// Heap_lock and retry.
Heap_lock->lock();
HeapWord* result = attempt_allocation_locked(word_size);
if (result != NULL) {
assert_heap_not_locked();
return result;
}
assert_heap_locked();
return NULL;
}
inline void
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
"pre-condition of the call");
assert(cur_alloc_region->is_young(),
"we only support young current alloc regions");
// The region is guaranteed to be young
g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
_summary_bytes_used += cur_alloc_region->used();
_cur_alloc_region = NULL;
}
inline HeapWord*
G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
assert_heap_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation_locked() "
"should not be called for humongous allocation requests");
// First, reread the current alloc region and retry the allocation
// in case somebody replaced it while we were waiting to get the
// Heap_lock.
HeapRegion* cur_alloc_region = _cur_alloc_region;
if (cur_alloc_region != NULL) {
HeapWord* result = allocate_from_cur_alloc_region(
cur_alloc_region, word_size,
true /* with_heap_lock */);
if (result != NULL) { if (result != NULL) {
assert_heap_not_locked(); assert_heap_not_locked();
return result; return result;
} }
assert_heap_locked(); // We failed to allocate out of the current alloc region, so let's
// retire it before getting a new one.
// Since we couldn't successfully allocate into it, retire the
// current alloc region.
retire_cur_alloc_region(cur_alloc_region); retire_cur_alloc_region(cur_alloc_region);
} }
assert_heap_locked();
// Try to get a new region and allocate out of it // Try to get a new region and allocate out of it
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
false, /* at_safepoint */ false, /* at_safepoint */
@ -131,20 +186,6 @@ G1CollectedHeap::attempt_allocation(size_t word_size) {
return NULL; return NULL;
} }
inline void
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
assert_heap_locked_or_at_safepoint();
assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
"pre-condition of the call");
assert(cur_alloc_region->is_young(),
"we only support young current alloc regions");
// The region is guaranteed to be young
g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
_summary_bytes_used += cur_alloc_region->used();
_cur_alloc_region = NULL;
}
// It dirties the cards that cover the block so that so that the post // It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this // write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block // block. It is assumed (and in fact we assert) that the block

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2875,8 +2875,6 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
// Adjust for expansion and slop. // Adjust for expansion and slop.
max_live_bytes = max_live_bytes + expansion_bytes; max_live_bytes = max_live_bytes + expansion_bytes;
assert(_g1->regions_accounted_for(), "Region leakage!");
HeapRegion* hr; HeapRegion* hr;
if (in_young_gc_mode()) { if (in_young_gc_mode()) {
double young_start_time_sec = os::elapsedTime(); double young_start_time_sec = os::elapsedTime();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -113,6 +113,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
Threads::gc_epilogue(); Threads::gc_epilogue();
CodeCache::gc_epilogue(); CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
// refs processing: clean slate // refs processing: clean slate
GenMarkSweep::_ref_processor = NULL; GenMarkSweep::_ref_processor = NULL;
@ -180,26 +181,46 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
} }
class G1PrepareCompactClosure: public HeapRegionClosure { class G1PrepareCompactClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
ModRefBarrierSet* _mrbs; ModRefBarrierSet* _mrbs;
CompactPoint _cp; CompactPoint _cp;
size_t _pre_used;
FreeRegionList _free_list;
HumongousRegionSet _humongous_proxy_set;
void free_humongous_region(HeapRegion* hr) { void free_humongous_region(HeapRegion* hr) {
HeapWord* bot = hr->bottom();
HeapWord* end = hr->end(); HeapWord* end = hr->end();
assert(hr->startsHumongous(), assert(hr->startsHumongous(),
"Only the start of a humongous region should be freed."); "Only the start of a humongous region should be freed.");
G1CollectedHeap::heap()->free_region(hr); _g1h->free_humongous_region(hr, &_pre_used, &_free_list,
&_humongous_proxy_set, false /* par */);
// Do we also need to do this for the continues humongous regions
// we just collapsed?
hr->prepare_for_compaction(&_cp); hr->prepare_for_compaction(&_cp);
// Also clear the part of the card table that will be unused after // Also clear the part of the card table that will be unused after
// compaction. // compaction.
_mrbs->clear(MemRegion(hr->compaction_top(), hr->end())); _mrbs->clear(MemRegion(hr->compaction_top(), end));
} }
public: public:
G1PrepareCompactClosure(CompactibleSpace* cs) : G1PrepareCompactClosure(CompactibleSpace* cs)
: _g1h(G1CollectedHeap::heap()),
_mrbs(G1CollectedHeap::heap()->mr_bs()),
_cp(NULL, cs, cs->initialize_threshold()), _cp(NULL, cs, cs->initialize_threshold()),
_mrbs(G1CollectedHeap::heap()->mr_bs()) _pre_used(0),
{} _free_list("Local Free List for G1MarkSweep"),
_humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
void update_sets() {
// We'll recalculate total used bytes and recreate the free list
// at the end of the GC, so no point in updating those values here.
_g1h->update_sets_after_freeing_regions(0, /* pre_used */
NULL, /* free_list */
&_humongous_proxy_set,
false /* par */);
_free_list.remove_all();
}
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
if (hr->isHumongous()) { if (hr->isHumongous()) {
if (hr->startsHumongous()) { if (hr->startsHumongous()) {
@ -265,6 +286,7 @@ void G1MarkSweep::mark_sweep_phase2() {
G1PrepareCompactClosure blk(sp); G1PrepareCompactClosure blk(sp);
g1h->heap_region_iterate(&blk); g1h->heap_region_iterate(&blk);
blk.update_sets();
CompactPoint perm_cp(pg, NULL, NULL); CompactPoint perm_cp(pg, NULL, NULL);
pg->prepare_for_compaction(&perm_cp); pg->prepare_for_compaction(&perm_cp);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -75,21 +75,12 @@
"(0 means do not periodically generate this info); " \ "(0 means do not periodically generate this info); " \
"it also requires -XX:+G1SummarizeRSetStats") \ "it also requires -XX:+G1SummarizeRSetStats") \
\ \
diagnostic(bool, G1SummarizeZFStats, false, \
"Summarize zero-filling info") \
\
diagnostic(bool, G1TraceConcRefinement, false, \ diagnostic(bool, G1TraceConcRefinement, false, \
"Trace G1 concurrent refinement") \ "Trace G1 concurrent refinement") \
\ \
product(intx, G1MarkRegionStackSize, 1024 * 1024, \ product(intx, G1MarkRegionStackSize, 1024 * 1024, \
"Size of the region stack for concurrent marking.") \ "Size of the region stack for concurrent marking.") \
\ \
develop(bool, G1ConcZeroFill, true, \
"If true, run concurrent zero-filling thread") \
\
develop(intx, G1ConcZFMaxRegions, 1, \
"Stop zero-filling when # of zf'd regions reaches") \
\
develop(bool, G1SATBBarrierPrintNullPreVals, false, \ develop(bool, G1SATBBarrierPrintNullPreVals, false, \
"If true, count frac of ptr writes with null pre-vals.") \ "If true, count frac of ptr writes with null pre-vals.") \
\ \
@ -99,6 +90,13 @@
develop(intx, G1SATBProcessCompletedThreshold, 20, \ develop(intx, G1SATBProcessCompletedThreshold, 20, \
"Number of completed buffers that triggers log processing.") \ "Number of completed buffers that triggers log processing.") \
\ \
product(uintx, G1SATBBufferEnqueueingThresholdPercent, 60, \
"Before enqueueing them, each mutator thread tries to do some " \
"filtering on the SATB buffers it generates. If post-filtering " \
"the percentage of retained entries is over this threshold " \
"the buffer will be enqueued for processing. A value of 0 " \
"specifies that mutator threads should not do such filtering.") \
\
develop(intx, G1ExtraRegionSurvRate, 33, \ develop(intx, G1ExtraRegionSurvRate, 33, \
"If the young survival rate is S, and there's room left in " \ "If the young survival rate is S, and there's room left in " \
"to-space, we will allow regions whose survival rate is up to " \ "to-space, we will allow regions whose survival rate is up to " \
@ -282,7 +280,20 @@
"Size of a work unit of cards claimed by a worker thread" \ "Size of a work unit of cards claimed by a worker thread" \
"during RSet scanning.") \ "during RSet scanning.") \
\ \
develop(bool, ReduceInitialCardMarksForG1, false, \ develop(uintx, G1SecondaryFreeListAppendLength, 5, \
"The number of regions we will add to the secondary free list " \
"at every append operation") \
\
develop(bool, G1ConcRegionFreeingVerbose, false, \
"Enables verboseness during concurrent region freeing") \
\
develop(bool, G1StressConcRegionFreeing, false, \
"It stresses the concurrent region freeing operation") \
\
develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \
"Artificial delay during concurrent region freeing") \
\
develop(bool, ReduceInitialCardMarksForG1, false, \
"When ReduceInitialCardMarks is true, this flag setting " \ "When ReduceInitialCardMarks is true, this flag setting " \
" controls whether G1 allows the RICM optimization") " controls whether G1 allows the RICM optimization")

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,6 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_implementation/g1/concurrentZFThread.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp"
@ -348,22 +347,20 @@ HeapRegion::new_dcto_closure(OopClosure* cl,
} }
void HeapRegion::hr_clear(bool par, bool clear_space) { void HeapRegion::hr_clear(bool par, bool clear_space) {
_humongous_type = NotHumongous; assert(_humongous_type == NotHumongous,
_humongous_start_region = NULL; "we should have already filtered out humongous regions");
assert(_humongous_start_region == NULL,
"we should have already filtered out humongous regions");
assert(_end == _orig_end,
"we should have already filtered out humongous regions");
_in_collection_set = false; _in_collection_set = false;
_is_gc_alloc_region = false; _is_gc_alloc_region = false;
// Age stuff (if parallel, this will be done separately, since it needs
// to be sequential).
G1CollectedHeap* g1h = G1CollectedHeap::heap();
set_young_index_in_cset(-1); set_young_index_in_cset(-1);
uninstall_surv_rate_group(); uninstall_surv_rate_group();
set_young_type(NotYoung); set_young_type(NotYoung);
// In case it had been the start of a humongous sequence, reset its end.
set_end(_orig_end);
if (!par) { if (!par) {
// If this is parallel, this will be done later. // If this is parallel, this will be done later.
HeapRegionRemSet* hrrs = rem_set(); HeapRegionRemSet* hrrs = rem_set();
@ -386,26 +383,49 @@ void HeapRegion::calc_gc_efficiency() {
} }
// </PREDICTION> // </PREDICTION>
void HeapRegion::set_startsHumongous(HeapWord* new_end) { void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
assert(!isHumongous(), "sanity / pre-condition");
assert(end() == _orig_end, assert(end() == _orig_end,
"Should be normal before the humongous object allocation"); "Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty"); assert(top() == bottom(), "should be empty");
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
_humongous_type = StartsHumongous; _humongous_type = StartsHumongous;
_humongous_start_region = this; _humongous_start_region = this;
set_end(new_end); set_end(new_end);
_offsets.set_for_starts_humongous(new_end); _offsets.set_for_starts_humongous(new_top);
} }
void HeapRegion::set_continuesHumongous(HeapRegion* start) { void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
assert(!isHumongous(), "sanity / pre-condition");
assert(end() == _orig_end, assert(end() == _orig_end,
"Should be normal before the humongous object allocation"); "Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty"); assert(top() == bottom(), "should be empty");
assert(start->startsHumongous(), "pre-condition"); assert(first_hr->startsHumongous(), "pre-condition");
_humongous_type = ContinuesHumongous; _humongous_type = ContinuesHumongous;
_humongous_start_region = start; _humongous_start_region = first_hr;
}
void HeapRegion::set_notHumongous() {
assert(isHumongous(), "pre-condition");
if (startsHumongous()) {
assert(top() <= end(), "pre-condition");
set_end(_orig_end);
if (top() > end()) {
// at least one "continues humongous" region after it
set_top(end());
}
} else {
// continues humongous
assert(end() == _orig_end, "sanity");
}
assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
_humongous_type = NotHumongous;
_humongous_start_region = NULL;
} }
bool HeapRegion::claimHeapRegion(jint claimValue) { bool HeapRegion::claimHeapRegion(jint claimValue) {
@ -442,15 +462,6 @@ HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
return low; return low;
} }
void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
_next_in_special_set = r;
}
void HeapRegion::set_on_unclean_list(bool b) {
_is_on_unclean_list = b;
}
void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
G1OffsetTableContigSpace::initialize(mr, false, mangle_space); G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
hr_clear(false/*par*/, clear_space); hr_clear(false/*par*/, clear_space);
@ -468,15 +479,16 @@ HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
_hrs_index(-1), _hrs_index(-1),
_humongous_type(NotHumongous), _humongous_start_region(NULL), _humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false), _is_gc_alloc_region(false), _in_collection_set(false), _is_gc_alloc_region(false),
_is_on_free_list(false), _is_on_unclean_list(false),
_next_in_special_set(NULL), _orig_end(NULL), _next_in_special_set(NULL), _orig_end(NULL),
_claimed(InitialClaimValue), _evacuation_failed(false), _claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
_young_type(NotYoung), _next_young_region(NULL), _young_type(NotYoung), _next_young_region(NULL),
_next_dirty_cards_region(NULL), _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), #ifdef ASSERT
_rem_set(NULL), _zfs(NotZeroFilled), _containing_set(NULL),
_recorded_rs_length(0), _predicted_elapsed_time_ms(0), #endif // ASSERT
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
_rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
_predicted_bytes_to_copy(0) _predicted_bytes_to_copy(0)
{ {
_orig_end = mr.end(); _orig_end = mr.end();
@ -551,86 +563,6 @@ void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
} }
#ifdef DEBUG
HeapWord* HeapRegion::allocate(size_t size) {
jint state = zero_fill_state();
assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
zero_fill_is_allocated(),
"When ZF is on, only alloc in ZF'd regions");
return G1OffsetTableContigSpace::allocate(size);
}
#endif
void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
assert(ZF_mon->owned_by_self() ||
Universe::heap()->is_gc_active(),
"Must hold the lock or be a full GC to modify.");
#ifdef ASSERT
if (top() != bottom() && zfs != Allocated) {
ResourceMark rm;
stringStream region_str;
print_on(&region_str);
assert(top() == bottom() || zfs == Allocated,
err_msg("Region must be empty, or we must be setting it to allocated. "
"_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
}
#endif
_zfs = zfs;
}
void HeapRegion::set_zero_fill_complete() {
set_zero_fill_state_work(ZeroFilled);
if (ZF_mon->owned_by_self()) {
ZF_mon->notify_all();
}
}
void HeapRegion::ensure_zero_filled() {
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
ensure_zero_filled_locked();
}
void HeapRegion::ensure_zero_filled_locked() {
assert(ZF_mon->owned_by_self(), "Precondition");
bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
assert(should_ignore_zf || Heap_lock->is_locked(),
"Either we're in a GC or we're allocating a region.");
switch (zero_fill_state()) {
case HeapRegion::NotZeroFilled:
set_zero_fill_in_progress(Thread::current());
{
ZF_mon->unlock();
Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
ZF_mon->lock_without_safepoint_check();
}
// A trap.
guarantee(zero_fill_state() == HeapRegion::ZeroFilling
&& zero_filler() == Thread::current(),
"AHA! Tell Dave D if you see this...");
set_zero_fill_complete();
// gclog_or_tty->print_cr("Did sync ZF.");
ConcurrentZFThread::note_sync_zfs();
break;
case HeapRegion::ZeroFilling:
if (should_ignore_zf) {
// We can "break" the lock and take over the work.
Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
set_zero_fill_complete();
ConcurrentZFThread::note_sync_zfs();
break;
} else {
ConcurrentZFThread::wait_for_ZF_completed(this);
}
case HeapRegion::ZeroFilled:
// Nothing to do.
break;
case HeapRegion::Allocated:
guarantee(false, "Should not call on allocated regions.");
}
assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
}
HeapWord* HeapWord*
HeapRegion::object_iterate_mem_careful(MemRegion mr, HeapRegion::object_iterate_mem_careful(MemRegion mr,
ObjectClosure* cl) { ObjectClosure* cl) {
@ -782,9 +714,6 @@ void HeapRegion::verify(bool allow_dirty) const {
verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy); verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
} }
#define OBJ_SAMPLE_INTERVAL 0
#define BLOCK_SAMPLE_INTERVAL 100
// This really ought to be commoned up into OffsetTableContigSpace somehow. // This really ought to be commoned up into OffsetTableContigSpace somehow.
// We would need a mechanism to make that code skip dead objects. // We would need a mechanism to make that code skip dead objects.
@ -795,83 +724,125 @@ void HeapRegion::verify(bool allow_dirty,
*failures = false; *failures = false;
HeapWord* p = bottom(); HeapWord* p = bottom();
HeapWord* prev_p = NULL; HeapWord* prev_p = NULL;
int objs = 0;
int blocks = 0;
VerifyLiveClosure vl_cl(g1, use_prev_marking); VerifyLiveClosure vl_cl(g1, use_prev_marking);
bool is_humongous = isHumongous(); bool is_humongous = isHumongous();
bool do_bot_verify = !is_young();
size_t object_num = 0; size_t object_num = 0;
while (p < top()) { while (p < top()) {
size_t size = oop(p)->size(); oop obj = oop(p);
if (is_humongous != g1->isHumongous(size)) { size_t obj_size = obj->size();
object_num += 1;
if (is_humongous != g1->isHumongous(obj_size)) {
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
SIZE_FORMAT" words) in a %shumongous region", SIZE_FORMAT" words) in a %shumongous region",
p, g1->isHumongous(size) ? "" : "non-", p, g1->isHumongous(obj_size) ? "" : "non-",
size, is_humongous ? "" : "non-"); obj_size, is_humongous ? "" : "non-");
*failures = true; *failures = true;
return;
} }
object_num += 1;
if (blocks == BLOCK_SAMPLE_INTERVAL) { // If it returns false, verify_for_object() will output the
HeapWord* res = block_start_const(p + (size/2)); // appropriate messasge.
if (p != res) { if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and " *failures = true;
SIZE_FORMAT" returned "PTR_FORMAT, return;
p, size, res);
*failures = true;
return;
}
blocks = 0;
} else {
blocks++;
} }
if (objs == OBJ_SAMPLE_INTERVAL) {
oop obj = oop(p); if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) { if (obj->is_oop()) {
if (obj->is_oop()) { klassOop klass = obj->klass();
klassOop klass = obj->klass(); if (!klass->is_perm()) {
if (!klass->is_perm()) { gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " "not in perm", klass, obj);
"not in perm", klass, obj);
*failures = true;
return;
} else if (!klass->is_klass()) {
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
"not a klass", klass, obj);
*failures = true;
return;
} else {
vl_cl.set_containing_obj(obj);
obj->oop_iterate(&vl_cl);
if (vl_cl.failures()) {
*failures = true;
}
if (G1MaxVerifyFailures >= 0 &&
vl_cl.n_failures() >= G1MaxVerifyFailures) {
return;
}
}
} else {
gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
*failures = true; *failures = true;
return; return;
} else if (!klass->is_klass()) {
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
"not a klass", klass, obj);
*failures = true;
return;
} else {
vl_cl.set_containing_obj(obj);
obj->oop_iterate(&vl_cl);
if (vl_cl.failures()) {
*failures = true;
}
if (G1MaxVerifyFailures >= 0 &&
vl_cl.n_failures() >= G1MaxVerifyFailures) {
return;
}
} }
} } else {
objs = 0; gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
} else {
objs++;
}
prev_p = p;
p += size;
}
HeapWord* rend = end();
HeapWord* rtop = top();
if (rtop < rend) {
HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
if (res != rtop) {
gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
PTR_FORMAT" returned "PTR_FORMAT,
rtop, rend, res);
*failures = true; *failures = true;
return; return;
}
}
prev_p = p;
p += obj_size;
}
if (p != top()) {
gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
"does not match top "PTR_FORMAT, p, top());
*failures = true;
return;
}
HeapWord* the_end = end();
assert(p == top(), "it should still hold");
// Do some extra BOT consistency checking for addresses in the
// range [top, end). BOT look-ups in this range should yield
// top. No point in doing that if top == end (there's nothing there).
if (p < the_end) {
// Look up top
HeapWord* addr_1 = p;
HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
if (b_start_1 != p) {
gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
" yielded "PTR_FORMAT", expecting "PTR_FORMAT,
addr_1, b_start_1, p);
*failures = true;
return;
}
// Look up top + 1
HeapWord* addr_2 = p + 1;
if (addr_2 < the_end) {
HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
if (b_start_2 != p) {
gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
" yielded "PTR_FORMAT", expecting "PTR_FORMAT,
addr_2, b_start_2, p);
*failures = true;
return;
}
}
// Look up an address between top and end
size_t diff = pointer_delta(the_end, p) / 2;
HeapWord* addr_3 = p + diff;
if (addr_3 < the_end) {
HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
if (b_start_3 != p) {
gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
" yielded "PTR_FORMAT", expecting "PTR_FORMAT,
addr_3, b_start_3, p);
*failures = true;
return;
}
}
// Loook up end - 1
HeapWord* addr_4 = the_end - 1;
HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
if (b_start_4 != p) {
gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
" yielded "PTR_FORMAT", expecting "PTR_FORMAT,
addr_4, b_start_4, p);
*failures = true;
return;
} }
} }
@ -880,12 +851,6 @@ void HeapRegion::verify(bool allow_dirty,
"but has "SIZE_FORMAT", objects", "but has "SIZE_FORMAT", objects",
bottom(), end(), object_num); bottom(), end(), object_num);
*failures = true; *failures = true;
}
if (p != top()) {
gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
"does not match top "PTR_FORMAT, p, top());
*failures = true;
return; return;
} }
} }
@ -976,67 +941,3 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
_offsets.set_space(this); _offsets.set_space(this);
initialize(mr, !is_zeroed, SpaceDecorator::Mangle); initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
} }
size_t RegionList::length() {
size_t len = 0;
HeapRegion* cur = hd();
DEBUG_ONLY(HeapRegion* last = NULL);
while (cur != NULL) {
len++;
DEBUG_ONLY(last = cur);
cur = get_next(cur);
}
assert(last == tl(), "Invariant");
return len;
}
void RegionList::insert_before_head(HeapRegion* r) {
assert(well_formed(), "Inv");
set_next(r, hd());
_hd = r;
_sz++;
if (tl() == NULL) _tl = r;
assert(well_formed(), "Inv");
}
void RegionList::prepend_list(RegionList* new_list) {
assert(well_formed(), "Precondition");
assert(new_list->well_formed(), "Precondition");
HeapRegion* new_tl = new_list->tl();
if (new_tl != NULL) {
set_next(new_tl, hd());
_hd = new_list->hd();
_sz += new_list->sz();
if (tl() == NULL) _tl = new_list->tl();
} else {
assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
}
assert(well_formed(), "Inv");
}
void RegionList::delete_after(HeapRegion* r) {
assert(well_formed(), "Precondition");
HeapRegion* next = get_next(r);
assert(r != NULL, "Precondition");
HeapRegion* next_tl = get_next(next);
set_next(r, next_tl);
dec_sz();
if (next == tl()) {
assert(next_tl == NULL, "Inv");
_tl = r;
}
assert(well_formed(), "Inv");
}
HeapRegion* RegionList::pop() {
assert(well_formed(), "Inv");
HeapRegion* res = hd();
if (res != NULL) {
_hd = get_next(res);
_sz--;
set_next(res, NULL);
if (sz() == 0) _tl = NULL;
}
assert(well_formed(), "Inv");
return res;
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -50,6 +50,11 @@ class ContiguousSpace;
class HeapRegionRemSet; class HeapRegionRemSet;
class HeapRegionRemSetIterator; class HeapRegionRemSetIterator;
class HeapRegion; class HeapRegion;
class HeapRegionSetBase;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
(__hr)->top(), (__hr)->end()
// A dirty card to oop closure for heap regions. It // A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap // knows how to get the G1 heap and how to use the bitmap
@ -173,6 +178,19 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
virtual void print() const; virtual void print() const;
void reset_bot() {
_offsets.zero_bottom_entry();
_offsets.initialize_threshold();
}
void update_bot_for_object(HeapWord* start, size_t word_size) {
_offsets.alloc_block(start, word_size);
}
void print_bot_on(outputStream* out) {
_offsets.print_on(out);
}
}; };
class HeapRegion: public G1OffsetTableContigSpace { class HeapRegion: public G1OffsetTableContigSpace {
@ -214,12 +232,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// True iff the region is in current collection_set. // True iff the region is in current collection_set.
bool _in_collection_set; bool _in_collection_set;
// True iff the region is on the unclean list, waiting to be zero filled.
bool _is_on_unclean_list;
// True iff the region is on the free list, ready for allocation.
bool _is_on_free_list;
// Is this or has it been an allocation region in the current collection // Is this or has it been an allocation region in the current collection
// pause. // pause.
bool _is_gc_alloc_region; bool _is_gc_alloc_region;
@ -241,6 +253,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Next region whose cards need cleaning // Next region whose cards need cleaning
HeapRegion* _next_dirty_cards_region; HeapRegion* _next_dirty_cards_region;
// Fields used by the HeapRegionSetBase class and subclasses.
HeapRegion* _next;
#ifdef ASSERT
HeapRegionSetBase* _containing_set;
#endif // ASSERT
bool _pending_removal;
// For parallel heapRegion traversal. // For parallel heapRegion traversal.
jint _claimed; jint _claimed;
@ -292,10 +311,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
_top_at_conc_mark_count = bot; _top_at_conc_mark_count = bot;
} }
jint _zfs; // A member of ZeroFillState. Protected by ZF_lock.
Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
// made it so.
void set_young_type(YoungType new_type) { void set_young_type(YoungType new_type) {
//assert(_young_type != new_type, "setting the same type" ); //assert(_young_type != new_type, "setting the same type" );
// TODO: add more assertions here // TODO: add more assertions here
@ -349,15 +364,14 @@ class HeapRegion: public G1OffsetTableContigSpace {
RebuildRSClaimValue = 5 RebuildRSClaimValue = 5
}; };
// Concurrent refinement requires contiguous heap regions (in which TLABs inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
// might be allocated) to be zero-filled. Each region therefore has a assert(is_young(), "we can only skip BOT updates on young regions");
// zero-fill-state. return ContiguousSpace::par_allocate(word_size);
enum ZeroFillState { }
NotZeroFilled, inline HeapWord* allocate_no_bot_updates(size_t word_size) {
ZeroFilling, assert(is_young(), "we can only skip BOT updates on young regions");
ZeroFilled, return ContiguousSpace::allocate(word_size);
Allocated }
};
// If this region is a member of a HeapRegionSeq, the index in that // If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1. // sequence, otherwise -1.
@ -404,13 +418,38 @@ class HeapRegion: public G1OffsetTableContigSpace {
return _humongous_start_region; return _humongous_start_region;
} }
// Causes the current region to represent a humongous object spanning "n" // Makes the current region be a "starts humongous" region, i.e.,
// regions. // the first region in a series of one or more contiguous regions
void set_startsHumongous(HeapWord* new_end); // that will contain a single "humongous" object. The two parameters
// are as follows:
//
// new_top : The new value of the top field of this region which
// points to the end of the humongous object that's being
// allocated. If there is more than one region in the series, top
// will lie beyond this region's original end field and on the last
// region in the series.
//
// new_end : The new value of the end field of this region which
// points to the end of the last region in the series. If there is
// one region in the series (namely: this one) end will be the same
// as the original end of this region.
//
// Updating top and end as described above makes this region look as
// if it spans the entire space taken up by all the regions in the
// series and an single allocation moved its top to new_top. This
// ensures that the space (capacity / allocated) taken up by all
// humongous regions can be calculated by just looking at the
// "starts humongous" regions and by ignoring the "continues
// humongous" regions.
void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
// The regions that continue a humongous sequence should be added using // Makes the current region be a "continues humongous'
// this method, in increasing address order. // region. first_hr is the "start humongous" region of the series
void set_continuesHumongous(HeapRegion* start); // which this region will be part of.
void set_continuesHumongous(HeapRegion* first_hr);
// Unsets the humongous-related fields on the region.
void set_notHumongous();
// If the region has a remembered set, return a pointer to it. // If the region has a remembered set, return a pointer to it.
HeapRegionRemSet* rem_set() const { HeapRegionRemSet* rem_set() const {
@ -458,45 +497,56 @@ class HeapRegion: public G1OffsetTableContigSpace {
_next_in_special_set = r; _next_in_special_set = r;
} }
bool is_on_free_list() { // Methods used by the HeapRegionSetBase class and subclasses.
return _is_on_free_list;
// Getter and setter for the next field used to link regions into
// linked lists.
HeapRegion* next() { return _next; }
void set_next(HeapRegion* next) { _next = next; }
// Every region added to a set is tagged with a reference to that
// set. This is used for doing consistency checking to make sure that
// the contents of a set are as they should be and it's only
// available in non-product builds.
#ifdef ASSERT
void set_containing_set(HeapRegionSetBase* containing_set) {
assert((containing_set == NULL && _containing_set != NULL) ||
(containing_set != NULL && _containing_set == NULL),
err_msg("containing_set: "PTR_FORMAT" "
"_containing_set: "PTR_FORMAT,
containing_set, _containing_set));
_containing_set = containing_set;
}
HeapRegionSetBase* containing_set() { return _containing_set; }
#else // ASSERT
void set_containing_set(HeapRegionSetBase* containing_set) { }
// containing_set() is only used in asserts so there's not reason
// to provide a dummy version of it.
#endif // ASSERT
// If we want to remove regions from a list in bulk we can simply tag
// them with the pending_removal tag and call the
// remove_all_pending() method on the list.
bool pending_removal() { return _pending_removal; }
void set_pending_removal(bool pending_removal) {
// We can only set pending_removal to true, if it's false and the
// region belongs to a set.
assert(!pending_removal ||
(!_pending_removal && containing_set() != NULL), "pre-condition");
// We can only set pending_removal to false, if it's true and the
// region does not belong to a set.
assert( pending_removal ||
( _pending_removal && containing_set() == NULL), "pre-condition");
_pending_removal = pending_removal;
} }
void set_on_free_list(bool b) {
_is_on_free_list = b;
}
HeapRegion* next_from_free_list() {
assert(is_on_free_list(),
"Should only invoke on free space.");
assert(_next_in_special_set == NULL ||
_next_in_special_set->is_on_free_list(),
"Malformed Free List.");
return _next_in_special_set;
}
void set_next_on_free_list(HeapRegion* r) {
assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
_next_in_special_set = r;
}
bool is_on_unclean_list() {
return _is_on_unclean_list;
}
void set_on_unclean_list(bool b);
HeapRegion* next_from_unclean_list() {
assert(is_on_unclean_list(),
"Should only invoke on unclean space.");
assert(_next_in_special_set == NULL ||
_next_in_special_set->is_on_unclean_list(),
"Malformed unclean List.");
return _next_in_special_set;
}
void set_next_on_unclean_list(HeapRegion* r);
HeapRegion* get_next_young_region() { return _next_young_region; } HeapRegion* get_next_young_region() { return _next_young_region; }
void set_next_young_region(HeapRegion* hr) { void set_next_young_region(HeapRegion* hr) {
_next_young_region = hr; _next_young_region = hr;
@ -515,11 +565,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
void initialize(MemRegion mr, bool clear_space, bool mangle_space); void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// Ensure that "this" is zero-filled.
void ensure_zero_filled();
// This one requires that the calling thread holds ZF_mon.
void ensure_zero_filled_locked();
// Get the start of the unmarked area in this region. // Get the start of the unmarked area in this region.
HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
@ -754,36 +799,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// "end" of the region if there is no such block. // "end" of the region if there is no such block.
HeapWord* next_block_start_careful(HeapWord* addr); HeapWord* next_block_start_careful(HeapWord* addr);
// Returns the zero-fill-state of the current region.
ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
bool zero_fill_is_allocated() { return _zfs == Allocated; }
Thread* zero_filler() { return _zero_filler; }
// Indicate that the contents of the region are unknown, and therefore
// might require zero-filling.
void set_zero_fill_needed() {
set_zero_fill_state_work(NotZeroFilled);
}
void set_zero_fill_in_progress(Thread* t) {
set_zero_fill_state_work(ZeroFilling);
_zero_filler = t;
}
void set_zero_fill_complete();
void set_zero_fill_allocated() {
set_zero_fill_state_work(Allocated);
}
void set_zero_fill_state_work(ZeroFillState zfs);
// This is called when a full collection shrinks the heap.
// We want to set the heap region to a value which says
// it is no longer part of the heap. For now, we'll let "NotZF" fill
// that role.
void reset_zero_fill() {
set_zero_fill_state_work(NotZeroFilled);
_zero_filler = NULL;
}
size_t recorded_rs_length() const { return _recorded_rs_length; } size_t recorded_rs_length() const { return _recorded_rs_length; }
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; } size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
@ -822,10 +837,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Override; it uses the "prev" marking information // Override; it uses the "prev" marking information
virtual void verify(bool allow_dirty) const; virtual void verify(bool allow_dirty) const;
#ifdef DEBUG
HeapWord* allocate(size_t size);
#endif
}; };
// HeapRegionClosure is used for iterating over regions. // HeapRegionClosure is used for iterating over regions.
@ -848,113 +859,6 @@ class HeapRegionClosure : public StackObj {
bool complete() { return _complete; } bool complete() { return _complete; }
}; };
// A linked lists of heap regions. It leaves the "next" field
// unspecified; that's up to subtypes.
class RegionList VALUE_OBJ_CLASS_SPEC {
protected:
virtual HeapRegion* get_next(HeapRegion* chr) = 0;
virtual void set_next(HeapRegion* chr,
HeapRegion* new_next) = 0;
HeapRegion* _hd;
HeapRegion* _tl;
size_t _sz;
// Protected constructor because this type is only meaningful
// when the _get/_set next functions are defined.
RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
public:
void reset() {
_hd = NULL;
_tl = NULL;
_sz = 0;
}
HeapRegion* hd() { return _hd; }
HeapRegion* tl() { return _tl; }
size_t sz() { return _sz; }
size_t length();
bool well_formed() {
return
((hd() == NULL && tl() == NULL && sz() == 0)
|| (hd() != NULL && tl() != NULL && sz() > 0))
&& (sz() == length());
}
virtual void insert_before_head(HeapRegion* r);
void prepend_list(RegionList* new_list);
virtual HeapRegion* pop();
void dec_sz() { _sz--; }
// Requires that "r" is an element of the list, and is not the tail.
void delete_after(HeapRegion* r);
};
class EmptyNonHRegionList: public RegionList {
protected:
// Protected constructor because this type is only meaningful
// when the _get/_set next functions are defined.
EmptyNonHRegionList() : RegionList() {}
public:
void insert_before_head(HeapRegion* r) {
// assert(r->is_empty(), "Better be empty");
assert(!r->isHumongous(), "Better not be humongous.");
RegionList::insert_before_head(r);
}
void prepend_list(EmptyNonHRegionList* new_list) {
// assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
// "Better be empty");
assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
"Better not be humongous.");
// assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
// "Better be empty");
assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
"Better not be humongous.");
RegionList::prepend_list(new_list);
}
};
class UncleanRegionList: public EmptyNonHRegionList {
public:
HeapRegion* get_next(HeapRegion* hr) {
return hr->next_from_unclean_list();
}
void set_next(HeapRegion* hr, HeapRegion* new_next) {
hr->set_next_on_unclean_list(new_next);
}
UncleanRegionList() : EmptyNonHRegionList() {}
void insert_before_head(HeapRegion* r) {
assert(!r->is_on_free_list(),
"Better not already be on free list");
assert(!r->is_on_unclean_list(),
"Better not already be on unclean list");
r->set_zero_fill_needed();
r->set_on_unclean_list(true);
EmptyNonHRegionList::insert_before_head(r);
}
void prepend_list(UncleanRegionList* new_list) {
assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
"Better not already be on free list");
assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
"Better already be marked as on unclean list");
assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
"Better not already be on free list");
assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
"Better already be marked as on unclean list");
EmptyNonHRegionList::prepend_list(new_list);
}
HeapRegion* pop() {
HeapRegion* res = RegionList::pop();
if (res != NULL) res->set_on_unclean_list(false);
return res;
}
};
// Local Variables: ***
// c-indentation-style: gnu ***
// End: ***
#endif // SERIALGC #endif // SERIALGC
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -65,152 +65,6 @@ HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
// Private methods. // Private methods.
HeapWord*
HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
assert(G1CollectedHeap::isHumongous(word_size),
"Allocation size should be humongous");
int cur = ind;
int first = cur;
size_t sumSizes = 0;
while (cur < _regions.length() && sumSizes < word_size) {
// Loop invariant:
// For all i in [first, cur):
// _regions.at(i)->is_empty()
// && _regions.at(i) is contiguous with its predecessor, if any
// && sumSizes is the sum of the sizes of the regions in the interval
// [first, cur)
HeapRegion* curhr = _regions.at(cur);
if (curhr->is_empty()
&& (first == cur
|| (_regions.at(cur-1)->end() ==
curhr->bottom()))) {
sumSizes += curhr->capacity() / HeapWordSize;
} else {
first = cur + 1;
sumSizes = 0;
}
cur++;
}
if (sumSizes >= word_size) {
_alloc_search_start = cur;
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// Currently, allocs_are_zero_filled() returns false. The zero
// filling infrastructure will be going away soon (see CR 6977804).
// So no need to do anything else here.
bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
assert(!zf, "not supported");
// This will be the "starts humongous" region.
HeapRegion* first_hr = _regions.at(first);
{
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
first_hr->set_zero_fill_allocated();
}
// The header of the new object will be placed at the bottom of
// the first region.
HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that
// should also match the end of the last region in the seriers.
// (Note: sumSizes = "region size" x "number of regions we found").
HeapWord* new_end = new_obj + sumSizes;
// This will be the new top of the first region that will reflect
// this allocation.
HeapWord* new_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_startsHumongous(new_end);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (int i = first + 1; i < cur; ++i) {
hr = _regions.at(i);
{
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
hr->set_zero_fill_allocated();
}
hr->set_continuesHumongous(first_hr);
}
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
assert(hr == NULL || hr->end() == new_end, "sanity");
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
OrderAccess::storestore();
// Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region.
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
"new_top should be in this region");
first_hr->set_top(new_top);
// Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise,
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !continuesHumongous(), but it is easier to just update the top
// fields here.
hr = NULL;
for (int i = first + 1; i < cur; ++i) {
hr = _regions.at(i);
if ((i + 1) == cur) {
// last continues humongous region
assert(hr->bottom() < new_top && new_top <= hr->end(),
"new_top should fall on this region");
hr->set_top(new_top);
} else {
// not last one
assert(new_top > hr->end(), "new_top should be above this region");
hr->set_top(hr->end());
}
}
// If we have continues humongous regions (hr != NULL), then the
// end of the last one should match new_end and its top should
// match new_top.
assert(hr == NULL ||
(hr->end() == new_end && hr->top() == new_top), "sanity");
return new_obj;
} else {
// If we started from the beginning, we want to know why we can't alloc.
return NULL;
}
}
void HeapRegionSeq::print_empty_runs() { void HeapRegionSeq::print_empty_runs() {
int empty_run = 0; int empty_run = 0;
int n_empty = 0; int n_empty = 0;
@ -284,13 +138,67 @@ size_t HeapRegionSeq::free_suffix() {
return res; return res;
} }
HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) { int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
int cur = _alloc_search_start; assert(num > 1, "pre-condition");
// Make sure "cur" is a valid index. assert(0 <= from && from <= _regions.length(),
assert(cur >= 0, "Invariant."); err_msg("from: %d should be valid and <= than %d",
HeapWord* res = alloc_obj_from_region_index(cur, word_size); from, _regions.length()));
if (res == NULL)
res = alloc_obj_from_region_index(0, word_size); int curr = from;
int first = -1;
size_t num_so_far = 0;
while (curr < _regions.length() && num_so_far < num) {
HeapRegion* curr_hr = _regions.at(curr);
if (curr_hr->is_empty()) {
if (first == -1) {
first = curr;
num_so_far = 1;
} else {
num_so_far += 1;
}
} else {
first = -1;
num_so_far = 0;
}
curr += 1;
}
assert(num_so_far <= num, "post-condition");
if (num_so_far == num) {
// we find enough space for the humongous object
assert(from <= first && first < _regions.length(), "post-condition");
assert(first < curr && (curr - first) == (int) num, "post-condition");
for (int i = first; i < first + (int) num; ++i) {
assert(_regions.at(i)->is_empty(), "post-condition");
}
return first;
} else {
// we failed to find enough space for the humongous object
return -1;
}
}
int HeapRegionSeq::find_contiguous(size_t num) {
assert(num > 1, "otherwise we should not be calling this");
assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
err_msg("_alloc_search_start: %d should be valid and <= than %d",
_alloc_search_start, _regions.length()));
int start = _alloc_search_start;
int res = find_contiguous_from(start, num);
if (res == -1 && start != 0) {
// Try starting from the beginning. If _alloc_search_start was 0,
// no point in doing this again.
res = find_contiguous_from(0, num);
}
if (res != -1) {
assert(0 <= res && res < _regions.length(),
err_msg("res: %d should be valid", res));
_alloc_search_start = res + (int) num;
}
assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
err_msg("_alloc_search_start: %d should be valid",
_alloc_search_start));
return res; return res;
} }
@ -376,6 +284,10 @@ void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
size_t& num_regions_deleted) { size_t& num_regions_deleted) {
// Reset this in case it's currently pointing into the regions that
// we just removed.
_alloc_search_start = 0;
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
@ -395,7 +307,6 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
} }
assert(cur == _regions.top(), "Should be top"); assert(cur == _regions.top(), "Should be top");
if (!cur->is_empty()) break; if (!cur->is_empty()) break;
cur->reset_zero_fill();
shrink_bytes -= cur->capacity(); shrink_bytes -= cur->capacity();
num_regions_deleted++; num_regions_deleted++;
_regions.pop(); _regions.pop();
@ -410,7 +321,6 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
return MemRegion(last_start, end); return MemRegion(last_start, end);
} }
class PrintHeapRegionClosure : public HeapRegionClosure { class PrintHeapRegionClosure : public HeapRegionClosure {
public: public:
bool doHeapRegion(HeapRegion* r) { bool doHeapRegion(HeapRegion* r) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -41,9 +41,9 @@ class HeapRegionSeq: public CHeapObj {
// (For efficiency only; private to obj_allocate after initialization.) // (For efficiency only; private to obj_allocate after initialization.)
int _alloc_search_start; int _alloc_search_start;
// Attempts to allocate a block of the (assumed humongous) word_size, // Finds a contiguous set of empty regions of length num, starting
// starting at the region "ind". // from a given index.
HeapWord* alloc_obj_from_region_index(int ind, size_t word_size); int find_contiguous_from(int from, size_t num);
// Currently, we're choosing collection sets in a round-robin fashion, // Currently, we're choosing collection sets in a round-robin fashion,
// starting here. // starting here.
@ -76,11 +76,8 @@ class HeapRegionSeq: public CHeapObj {
// that are available for allocation. // that are available for allocation.
size_t free_suffix(); size_t free_suffix();
// Requires "word_size" to be humongous (in the technical sense). If // Finds a contiguous set of empty regions of length num.
// possible, allocates a contiguous subsequence of the heap regions to int find_contiguous(size_t num);
// satisfy the allocation, and returns the address of the beginning of
// that sequence, otherwise returns NULL.
HeapWord* obj_allocate(size_t word_size);
// Apply the "doHeapRegion" method of "blk" to all regions in "this", // Apply the "doHeapRegion" method of "blk" to all regions in "this",
// in address order, terminating the iteration early // in address order, terminating the iteration early
@ -106,7 +103,7 @@ class HeapRegionSeq: public CHeapObj {
// If "addr" falls within a region in the sequence, return that region, // If "addr" falls within a region in the sequence, return that region,
// or else NULL. // or else NULL.
HeapRegion* addr_to_region(const void* addr); inline HeapRegion* addr_to_region(const void* addr);
void print(); void print();

View File

@ -0,0 +1,438 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
size_t HeapRegionSetBase::_unrealistically_long_length = 0;
//////////////////// HeapRegionSetBase ////////////////////
void HeapRegionSetBase::set_unrealistically_long_length(size_t len) {
guarantee(_unrealistically_long_length == 0, "should only be set once");
_unrealistically_long_length = len;
}
size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
assert(hr->startsHumongous(), "pre-condition");
assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes;
assert(region_num > 0, "sanity");
return region_num;
}
void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) {
msg->append("[%s] %s "
"ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
"cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
name(), message, length(), region_num(),
total_capacity_bytes(), total_used_bytes());
fill_in_ext_msg_extra(msg);
}
bool HeapRegionSetBase::verify_region(HeapRegion* hr,
HeapRegionSetBase* expected_containing_set) {
const char* error_message = NULL;
if (!regions_humongous()) {
if (hr->isHumongous()) {
error_message = "the region should not be humongous";
}
} else {
if (!hr->isHumongous() || !hr->startsHumongous()) {
error_message = "the region should be 'starts humongous'";
}
}
if (!regions_empty()) {
if (hr->is_empty()) {
error_message = "the region should not be empty";
}
} else {
if (!hr->is_empty()) {
error_message = "the region should be empty";
}
}
#ifdef ASSERT
// The _containing_set field is only available when ASSERT is defined.
if (hr->containing_set() != expected_containing_set) {
error_message = "inconsistent containing set found";
}
#endif // ASSERT
const char* extra_error_message = verify_region_extra(hr);
if (extra_error_message != NULL) {
error_message = extra_error_message;
}
if (error_message != NULL) {
outputStream* out = tty;
out->cr();
out->print_cr("## [%s] %s", name(), error_message);
out->print_cr("## Offending Region: "PTR_FORMAT, hr);
out->print_cr(" "HR_FORMAT, HR_FORMAT_PARAMS(hr));
#ifdef ASSERT
out->print_cr(" containing set: "PTR_FORMAT, hr->containing_set());
#endif // ASSERT
out->print_cr("## Offending Region Set: "PTR_FORMAT, this);
print_on(out);
return false;
} else {
return true;
}
}
void HeapRegionSetBase::verify() {
// It's important that we also observe the MT safety protocol even
// for the verification calls. If we do verification without the
// appropriate locks and the set changes underneath our feet
// verification might fail and send us on a wild goose chase.
hrl_assert_mt_safety_ok(this);
guarantee(( is_empty() && length() == 0 && region_num() == 0 &&
total_used_bytes() == 0 && total_capacity_bytes() == 0) ||
(!is_empty() && length() >= 0 && region_num() >= 0 &&
total_used_bytes() >= 0 && total_capacity_bytes() >= 0),
hrl_ext_msg(this, "invariant"));
guarantee((!regions_humongous() && region_num() == length()) ||
( regions_humongous() && region_num() >= length()),
hrl_ext_msg(this, "invariant"));
guarantee(!regions_empty() || total_used_bytes() == 0,
hrl_ext_msg(this, "invariant"));
guarantee(total_used_bytes() <= total_capacity_bytes(),
hrl_ext_msg(this, "invariant"));
}
void HeapRegionSetBase::verify_start() {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
assert(!_verify_in_progress,
hrl_ext_msg(this, "verification should not be in progress"));
// Do the basic verification first before we do the checks over the regions.
HeapRegionSetBase::verify();
_calc_length = 0;
_calc_region_num = 0;
_calc_total_capacity_bytes = 0;
_calc_total_used_bytes = 0;
_verify_in_progress = true;
}
void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
assert(_verify_in_progress,
hrl_ext_msg(this, "verification should be in progress"));
guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification"));
_calc_length += 1;
if (!hr->isHumongous()) {
_calc_region_num += 1;
} else {
_calc_region_num += calculate_region_num(hr);
}
_calc_total_capacity_bytes += hr->capacity();
_calc_total_used_bytes += hr->used();
}
void HeapRegionSetBase::verify_end() {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
assert(_verify_in_progress,
hrl_ext_msg(this, "verification should be in progress"));
guarantee(length() == _calc_length,
hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == "
"calc length: "SIZE_FORMAT,
name(), length(), _calc_length));
guarantee(region_num() == _calc_region_num,
hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
"calc region num: "SIZE_FORMAT,
name(), region_num(), _calc_region_num));
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
"calc capacity bytes: "SIZE_FORMAT,
name(),
total_capacity_bytes(), _calc_total_capacity_bytes));
guarantee(total_used_bytes() == _calc_total_used_bytes,
hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
"calc used bytes: "SIZE_FORMAT,
name(), total_used_bytes(), _calc_total_used_bytes));
_verify_in_progress = false;
}
void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
out->cr();
out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
out->print_cr(" Region Assumptions");
out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous()));
out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty()));
out->print_cr(" Attributes");
out->print_cr(" length : "SIZE_FORMAT_W(14), length());
out->print_cr(" region num : "SIZE_FORMAT_W(14), region_num());
out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes",
total_capacity_bytes());
out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes",
total_used_bytes());
}
void HeapRegionSetBase::clear() {
_length = 0;
_region_num = 0;
_total_used_bytes = 0;
}
HeapRegionSetBase::HeapRegionSetBase(const char* name)
: _name(name), _verify_in_progress(false),
_calc_length(0), _calc_region_num(0),
_calc_total_capacity_bytes(0), _calc_total_used_bytes(0) { }
//////////////////// HeapRegionSet ////////////////////
void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
hrl_assert_mt_safety_ok(this);
hrl_assert_mt_safety_ok(proxy_set);
hrl_assert_sets_match(this, proxy_set);
verify_optional();
proxy_set->verify_optional();
if (proxy_set->is_empty()) return;
assert(proxy_set->length() <= _length,
hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
"should be <= length: "SIZE_FORMAT,
name(), proxy_set->length(), _length));
_length -= proxy_set->length();
assert(proxy_set->region_num() <= _region_num,
hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
"should be <= region num: "SIZE_FORMAT,
name(), proxy_set->region_num(), _region_num));
_region_num -= proxy_set->region_num();
assert(proxy_set->total_used_bytes() <= _total_used_bytes,
hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
"should be <= used bytes: "SIZE_FORMAT,
name(), proxy_set->total_used_bytes(),
_total_used_bytes));
_total_used_bytes -= proxy_set->total_used_bytes();
proxy_set->clear();
verify_optional();
proxy_set->verify_optional();
}
//////////////////// HeapRegionLinkedList ////////////////////
void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) {
msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
}
void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
hrl_assert_mt_safety_ok(this);
hrl_assert_mt_safety_ok(from_list);
verify_optional();
from_list->verify_optional();
if (from_list->is_empty()) return;
#ifdef ASSERT
HeapRegionLinkedListIterator iter(from_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
// In set_containing_set() we check that we either set the value
// from NULL to non-NULL or vice versa to catch bugs. So, we have
// to NULL it first before setting it to the value.
hr->set_containing_set(NULL);
hr->set_containing_set(this);
}
#endif // ASSERT
if (_tail != NULL) {
assert(length() > 0 && _head != NULL, hrl_ext_msg(this, "invariant"));
_tail->set_next(from_list->_head);
} else {
assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant"));
_head = from_list->_head;
}
_tail = from_list->_tail;
_length += from_list->length();
_region_num += from_list->region_num();
_total_used_bytes += from_list->total_used_bytes();
from_list->clear();
verify_optional();
from_list->verify_optional();
}
void HeapRegionLinkedList::remove_all() {
hrl_assert_mt_safety_ok(this);
verify_optional();
HeapRegion* curr = _head;
while (curr != NULL) {
hrl_assert_region_ok(this, curr, this);
HeapRegion* next = curr->next();
curr->set_next(NULL);
curr->set_containing_set(NULL);
curr = next;
}
clear();
verify_optional();
}
void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
hrl_assert_mt_safety_ok(this);
assert(target_count > 1, hrl_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrl_ext_msg(this, "pre-condition"));
verify_optional();
DEBUG_ONLY(size_t old_length = length();)
HeapRegion* curr = _head;
HeapRegion* prev = NULL;
size_t count = 0;
while (curr != NULL) {
hrl_assert_region_ok(this, curr, this);
HeapRegion* next = curr->next();
if (curr->pending_removal()) {
assert(count < target_count,
hrl_err_msg("[%s] should not come across more regions "
"pending for removal than target_count: "SIZE_FORMAT,
name(), target_count));
if (prev == NULL) {
assert(_head == curr, hrl_ext_msg(this, "invariant"));
_head = next;
} else {
assert(_head != curr, hrl_ext_msg(this, "invariant"));
prev->set_next(next);
}
if (next == NULL) {
assert(_tail == curr, hrl_ext_msg(this, "invariant"));
_tail = prev;
} else {
assert(_tail != curr, hrl_ext_msg(this, "invariant"));
}
curr->set_next(NULL);
remove_internal(curr);
curr->set_pending_removal(false);
count += 1;
// If we have come across the target number of regions we can
// just bail out. However, for debugging purposes, we can just
// carry on iterating to make sure there are not more regions
// tagged with pending removal.
DEBUG_ONLY(if (count == target_count) break;)
} else {
prev = curr;
}
curr = next;
}
assert(count == target_count,
hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == "
"target_count: "SIZE_FORMAT, name(), count, target_count));
assert(length() + target_count == old_length,
hrl_err_msg("[%s] new length should be consistent "
"new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
"target_count: "SIZE_FORMAT,
name(), length(), old_length, target_count));
verify_optional();
}
void HeapRegionLinkedList::verify() {
// See comment in HeapRegionSetBase::verify() about MT safety and
// verification.
hrl_assert_mt_safety_ok(this);
// This will also do the basic verification too.
verify_start();
HeapRegion* curr = _head;
HeapRegion* prev1 = NULL;
HeapRegion* prev0 = NULL;
size_t count = 0;
while (curr != NULL) {
verify_next_region(curr);
count += 1;
guarantee(count < _unrealistically_long_length,
hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
"seems very long, is there maybe a cycle? "
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
"prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
name(), count, curr, prev0, prev1, length()));
prev1 = prev0;
prev0 = curr;
curr = curr->next();
}
guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition"));
verify_end();
}
void HeapRegionLinkedList::clear() {
HeapRegionSetBase::clear();
_head = NULL;
_tail = NULL;
}
void HeapRegionLinkedList::print_on(outputStream* out, bool print_contents) {
HeapRegionSetBase::print_on(out, print_contents);
out->print_cr(" Linking");
out->print_cr(" head : "PTR_FORMAT, _head);
out->print_cr(" tail : "PTR_FORMAT, _tail);
if (print_contents) {
out->print_cr(" Contents");
HeapRegionLinkedListIterator iter(this);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
hr->print_on(out);
}
}
}

View File

@ -0,0 +1,346 @@
/*
* copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
#include "gc_implementation/g1/heapRegion.hpp"
// Large buffer for some cases where the output might be larger than normal.
#define HRL_ERR_MSG_BUFSZ 512
typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
// Set verification will be forced either if someone defines
// HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which
// asserts are compiled in.
#ifndef HEAP_REGION_SET_FORCE_VERIFY
#define HEAP_REGION_SET_FORCE_VERIFY defined(ASSERT)
#endif // HEAP_REGION_SET_FORCE_VERIFY
//////////////////// HeapRegionSetBase ////////////////////
// Base class for all the classes that represent heap region sets. It
// contains the basic attributes that each set needs to maintain
// (e.g., length, region num, used bytes sum) plus any shared
// functionality (e.g., verification).
class hrl_ext_msg;
class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
friend class hrl_ext_msg;
protected:
static size_t calculate_region_num(HeapRegion* hr);
static size_t _unrealistically_long_length;
// The number of regions added to the set. If the set contains
// only humongous regions, this reflects only 'starts humongous'
// regions and does not include 'continues humongous' ones.
size_t _length;
// The total number of regions represented by the set. If the set
// does not contain humongous regions, this should be the same as
// _length. If the set contains only humongous regions, this will
// include the 'continues humongous' regions.
size_t _region_num;
// We don't keep track of the total capacity explicitly, we instead
// recalculate it based on _region_num and the heap region size.
// The sum of used bytes in the all the regions in the set.
size_t _total_used_bytes;
const char* _name;
bool _verify_in_progress;
size_t _calc_length;
size_t _calc_region_num;
size_t _calc_total_capacity_bytes;
size_t _calc_total_used_bytes;
// verify_region() is used to ensure that the contents of a region
// added to / removed from a set are consistent. Different sets
// make different assumptions about the regions added to them. So
// each set can override verify_region_extra(), which is called
// from verify_region(), and do any extra verification it needs to
// perform in that.
virtual const char* verify_region_extra(HeapRegion* hr) { return NULL; }
bool verify_region(HeapRegion* hr,
HeapRegionSetBase* expected_containing_set);
// Indicates whether all regions in the set should be humongous or
// not. Only used during verification.
virtual bool regions_humongous() = 0;
// Indicates whether all regions in the set should be empty or
// not. Only used during verification.
virtual bool regions_empty() = 0;
// Subclasses can optionally override this to do MT safety protocol
// checks. It is called in an assert from all methods that perform
// updates on the set (and subclasses should also call it too).
virtual bool check_mt_safety() { return true; }
// fill_in_ext_msg() writes the the values of the set's attributes
// in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra()
// allows subclasses to append further information.
virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { }
void fill_in_ext_msg(hrl_ext_msg* msg, const char* message);
// It updates the fields of the set to reflect hr being added to
// the set.
inline void update_for_addition(HeapRegion* hr);
// It updates the fields of the set to reflect hr being added to
// the set and tags the region appropriately.
inline void add_internal(HeapRegion* hr);
// It updates the fields of the set to reflect hr being removed
// from the set.
inline void update_for_removal(HeapRegion* hr);
// It updates the fields of the set to reflect hr being removed
// from the set and tags the region appropriately.
inline void remove_internal(HeapRegion* hr);
// It clears all the fields of the sets. Note: it will not iterate
// over the set and remove regions from it. It assumes that the
// caller has already done so. It will literally just clear the fields.
virtual void clear();
HeapRegionSetBase(const char* name);
public:
static void set_unrealistically_long_length(size_t len);
const char* name() { return _name; }
size_t length() { return _length; }
bool is_empty() { return _length == 0; }
size_t region_num() { return _region_num; }
size_t total_capacity_bytes() {
return region_num() << HeapRegion::LogOfHRGrainBytes;
}
size_t total_used_bytes() { return _total_used_bytes; }
virtual void verify();
void verify_start();
void verify_next_region(HeapRegion* hr);
void verify_end();
#if HEAP_REGION_SET_FORCE_VERIFY
void verify_optional() {
verify();
}
#else // HEAP_REGION_SET_FORCE_VERIFY
void verify_optional() { }
#endif // HEAP_REGION_SET_FORCE_VERIFY
virtual void print_on(outputStream* out, bool print_contents = false);
};
// Customized err_msg for heap region sets. Apart from a
// assert/guarantee-specific message it also prints out the values of
// the fields of the associated set. This can be very helpful in
// diagnosing failures.
class hrl_ext_msg : public hrl_err_msg {
public:
hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") {
set->fill_in_ext_msg(this, message);
}
};
// These two macros are provided for convenience, to keep the uses of
// these two asserts a bit more concise.
#define hrl_assert_mt_safety_ok(_set_) \
do { \
assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety")); \
} while (0)
#define hrl_assert_region_ok(_set_, _hr_, _expected_) \
do { \
assert((_set_)->verify_region((_hr_), (_expected_)), \
hrl_ext_msg((_set_), "region verification")); \
} while (0)
//////////////////// HeapRegionSet ////////////////////
#define hrl_assert_sets_match(_set1_, _set2_) \
do { \
assert(((_set1_)->regions_humongous() == \
(_set2_)->regions_humongous()) && \
((_set1_)->regions_empty() == (_set2_)->regions_empty()), \
hrl_err_msg("the contents of set %s and set %s should match", \
(_set1_)->name(), (_set2_)->name())); \
} while (0)
// This class represents heap region sets whose members are not
// explicitly tracked. It's helpful to group regions using such sets
// so that we can reason about all the region groups in the heap using
// the same interface (namely, the HeapRegionSetBase API).
class HeapRegionSet : public HeapRegionSetBase {
protected:
virtual const char* verify_region_extra(HeapRegion* hr) {
if (hr->next() != NULL) {
return "next() should always be NULL as we do not link the regions";
}
return HeapRegionSetBase::verify_region_extra(hr);
}
HeapRegionSet(const char* name) : HeapRegionSetBase(name) {
clear();
}
public:
// It adds hr to the set. The region should not be a member of
// another set.
inline void add(HeapRegion* hr);
// It removes hr from the set. The region should be a member of
// this set.
inline void remove(HeapRegion* hr);
// It removes a region from the set. Instead of updating the fields
// of the set to reflect this removal, it accumulates the updates
// in proxy_set. The idea is that proxy_set is thread-local to
// avoid multiple threads updating the fields of the set
// concurrently and having to synchronize. The method
// update_from_proxy() will update the fields of the set from the
// proxy_set.
inline void remove_with_proxy(HeapRegion* hr, HeapRegionSet* proxy_set);
// After multiple calls to remove_with_proxy() the updates to the
// fields of the set are accumulated in proxy_set. This call
// updates the fields of the set from proxy_set.
void update_from_proxy(HeapRegionSet* proxy_set);
};
//////////////////// HeapRegionLinkedList ////////////////////
// A set that links all the regions added to it in a singly-linked
// list. We should try to avoid doing operations that iterate over
// such lists in performance critical paths. Typically we should
// add / remove one region at a time or concatenate two lists. All
// those operations are done in constant time.
class HeapRegionLinkedListIterator;
class HeapRegionLinkedList : public HeapRegionSetBase {
friend class HeapRegionLinkedListIterator;
private:
HeapRegion* _head;
HeapRegion* _tail;
// These are provided for use by the friend classes.
HeapRegion* head() { return _head; }
HeapRegion* tail() { return _tail; }
protected:
virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg);
// See the comment for HeapRegionSetBase::clear()
virtual void clear();
HeapRegionLinkedList(const char* name) : HeapRegionSetBase(name) {
clear();
}
public:
// It adds hr to the list as the new tail. The region should not be
// a member of another set.
inline void add_as_tail(HeapRegion* hr);
// It removes and returns the head of the list. It assumes that the
// list is not empty so it will return a non-NULL value.
inline HeapRegion* remove_head();
// Convenience method.
inline HeapRegion* remove_head_or_null();
// It moves the regions from from_list to this list and empties
// from_list. The new regions will appear in the same order as they
// were in from_list and be linked in the end of this list.
void add_as_tail(HeapRegionLinkedList* from_list);
// It empties the list by removing all regions from it.
void remove_all();
// It removes all regions in the list that are pending for removal
// (i.e., they have been tagged with "pending_removal"). The list
// must not be empty, target_count should reflect the exact number
// of regions that are pending for removal in the list, and
// target_count should be > 1 (currently, we never need to remove a
// single region using this).
void remove_all_pending(size_t target_count);
virtual void verify();
virtual void print_on(outputStream* out, bool print_contents = false);
};
//////////////////// HeapRegionLinkedList ////////////////////
// Iterator class that provides a convenient way to iterator over the
// regions in a HeapRegionLinkedList instance.
class HeapRegionLinkedListIterator : public StackObj {
private:
HeapRegionLinkedList* _list;
HeapRegion* _curr;
public:
bool more_available() {
return _curr != NULL;
}
HeapRegion* get_next() {
assert(more_available(),
"get_next() should be called when more regions are available");
// If we are going to introduce a count in the iterator we should
// do the "cycle" check.
HeapRegion* hr = _curr;
assert(_list->verify_region(hr, _list), "region verification");
_curr = hr->next();
return hr;
}
HeapRegionLinkedListIterator(HeapRegionLinkedList* list)
: _curr(NULL), _list(list) {
_curr = list->head();
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP

View File

@ -0,0 +1,159 @@
/*
* copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
#include "gc_implementation/g1/heapRegionSet.hpp"
//////////////////// HeapRegionSetBase ////////////////////
inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) {
// Assumes the caller has already verified the region.
_length += 1;
if (!hr->isHumongous()) {
_region_num += 1;
} else {
_region_num += calculate_region_num(hr);
}
_total_used_bytes += hr->used();
}
inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
hrl_assert_region_ok(this, hr, NULL);
assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked"));
update_for_addition(hr);
hr->set_containing_set(this);
}
inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
// Assumes the caller has already verified the region.
assert(_length > 0, hrl_ext_msg(this, "pre-condition"));
_length -= 1;
size_t region_num_diff;
if (!hr->isHumongous()) {
region_num_diff = 1;
} else {
region_num_diff = calculate_region_num(hr);
}
assert(region_num_diff <= _region_num,
hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" "
"should be <= region num: "SIZE_FORMAT,
name(), region_num_diff, _region_num));
_region_num -= region_num_diff;
size_t used_bytes = hr->used();
assert(used_bytes <= _total_used_bytes,
hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
"should be <= used bytes: "SIZE_FORMAT,
name(), used_bytes, _total_used_bytes));
_total_used_bytes -= used_bytes;
}
inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
hrl_assert_region_ok(this, hr, this);
assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked"));
hr->set_containing_set(NULL);
update_for_removal(hr);
}
//////////////////// HeapRegionSet ////////////////////
inline void HeapRegionSet::add(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
// add_internal() will verify the region.
add_internal(hr);
}
inline void HeapRegionSet::remove(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
// remove_internal() will verify the region.
remove_internal(hr);
}
inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
HeapRegionSet* proxy_set) {
// No need to fo the MT safety check here given that this method
// does not update the contents of the set but instead accumulates
// the changes in proxy_set which is assumed to be thread-local.
hrl_assert_sets_match(this, proxy_set);
hrl_assert_region_ok(this, hr, this);
hr->set_containing_set(NULL);
proxy_set->update_for_addition(hr);
}
//////////////////// HeapRegionLinkedList ////////////////////
inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
assert((length() == 0 && _head == NULL && _tail == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrl_ext_msg(this, "invariant"));
// add_internal() will verify the region.
add_internal(hr);
// Now link the region.
if (_tail != NULL) {
_tail->set_next(hr);
} else {
_head = hr;
}
_tail = hr;
}
inline HeapRegion* HeapRegionLinkedList::remove_head() {
hrl_assert_mt_safety_ok(this);
assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty"));
assert(length() > 0 && _head != NULL && _tail != NULL,
hrl_ext_msg(this, "invariant"));
// We need to unlink it first.
HeapRegion* hr = _head;
_head = hr->next();
if (_head == NULL) {
_tail = NULL;
}
hr->set_next(NULL);
// remove_internal() will verify the region.
remove_internal(hr);
return hr;
}
inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
hrl_assert_mt_safety_ok(this);
if (!is_empty()) {
return remove_head();
} else {
return NULL;
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP

View File

@ -0,0 +1,102 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
//////////////////// FreeRegionList ////////////////////
const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
if (hr->is_young()) {
return "the region should not be young";
}
// The superclass will check that the region is empty and
// not-humongous.
return HeapRegionLinkedList::verify_region_extra(hr);
}
//////////////////// MasterFreeRegionList ////////////////////
bool MasterFreeRegionList::check_mt_safety() {
// Master Free List MT safety protocol:
// (a) If we're at a safepoint, operations on the master free list
// should be invoked by either the VM thread (which will serialize
// them) or by the GC workers while holding the
// FreeList_lock.
// (b) If we're not at a safepoint, operations on the master free
// list should be invoked while holding the Heap_lock.
guarantee((SafepointSynchronize::is_at_safepoint() &&
(Thread::current()->is_VM_thread() ||
FreeList_lock->owned_by_self())) ||
(!SafepointSynchronize::is_at_safepoint() &&
Heap_lock->owned_by_self()),
hrl_ext_msg(this, "master free list MT safety protocol"));
return FreeRegionList::check_mt_safety();
}
//////////////////// SecondaryFreeRegionList ////////////////////
bool SecondaryFreeRegionList::check_mt_safety() {
// Secondary Free List MT safety protocol:
// Operations on the secondary free list should always be invoked
// while holding the SecondaryFreeList_lock.
guarantee(SecondaryFreeList_lock->owned_by_self(),
hrl_ext_msg(this, "secondary free list MT safety protocol"));
return FreeRegionList::check_mt_safety();
}
//////////////////// HumongousRegionSet ////////////////////
const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
if (hr->is_young()) {
return "the region should not be young";
}
// The superclass will check that the region is not empty and
// humongous.
return HeapRegionSet::verify_region_extra(hr);
}
//////////////////// HumongousRegionSet ////////////////////
bool MasterHumongousRegionSet::check_mt_safety() {
// Master Humongous Set MT safety protocol:
// (a) If we're at a safepoint, operations on the master humongous
// set should be invoked by either the VM thread (which will
// serialize them) or by the GC workers while holding the
// OldSets_lock.
// (b) If we're not at a safepoint, operations on the master
// humongous set should be invoked while holding the Heap_lock.
guarantee((SafepointSynchronize::is_at_safepoint() &&
(Thread::current()->is_VM_thread() ||
OldSets_lock->owned_by_self())) ||
(!SafepointSynchronize::is_at_safepoint() &&
Heap_lock->owned_by_self()),
hrl_ext_msg(this, "master humongous set MT safety protocol"));
return HumongousRegionSet::check_mt_safety();
}

View File

@ -0,0 +1,86 @@
/*
* copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
//////////////////// FreeRegionList ////////////////////
class FreeRegionList : public HeapRegionLinkedList {
protected:
virtual const char* verify_region_extra(HeapRegion* hr);
virtual bool regions_humongous() { return false; }
virtual bool regions_empty() { return true; }
public:
FreeRegionList(const char* name) : HeapRegionLinkedList(name) { }
};
//////////////////// MasterFreeRegionList ////////////////////
class MasterFreeRegionList : public FreeRegionList {
protected:
virtual bool check_mt_safety();
public:
MasterFreeRegionList(const char* name) : FreeRegionList(name) { }
};
//////////////////// SecondaryFreeRegionList ////////////////////
class SecondaryFreeRegionList : public FreeRegionList {
protected:
virtual bool check_mt_safety();
public:
SecondaryFreeRegionList(const char* name) : FreeRegionList(name) { }
};
//////////////////// HumongousRegionSet ////////////////////
class HumongousRegionSet : public HeapRegionSet {
protected:
virtual const char* verify_region_extra(HeapRegion* hr);
virtual bool regions_humongous() { return true; }
virtual bool regions_empty() { return false; }
public:
HumongousRegionSet(const char* name) : HeapRegionSet(name) { }
};
//////////////////// MasterHumongousRegionSet ////////////////////
class MasterHumongousRegionSet : public HumongousRegionSet {
protected:
virtual bool check_mt_safety();
public:
MasterHumongousRegionSet(const char* name) : HumongousRegionSet(name) { }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,8 +38,8 @@
# include "thread_windows.inline.hpp" # include "thread_windows.inline.hpp"
#endif #endif
PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm, bool active) : PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
_qset(qset_), _buf(NULL), _index(0), _active(active), _qset(qset), _buf(NULL), _index(0), _active(active),
_perm(perm), _lock(NULL) _perm(perm), _lock(NULL)
{} {}
@ -153,10 +153,16 @@ void PtrQueueSet::reduce_free_list() {
} }
void PtrQueue::handle_zero_index() { void PtrQueue::handle_zero_index() {
assert(0 == _index, "Precondition."); assert(_index == 0, "Precondition.");
// This thread records the full buffer and allocates a new one (while // This thread records the full buffer and allocates a new one (while
// holding the lock if there is one). // holding the lock if there is one).
if (_buf != NULL) { if (_buf != NULL) {
if (!should_enqueue_buffer()) {
assert(_index > 0, "the buffer can only be re-used if it's not full");
return;
}
if (_lock) { if (_lock) {
assert(_lock->owned_by_self(), "Required."); assert(_lock->owned_by_self(), "Required.");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -68,7 +68,7 @@ protected:
public: public:
// Initialize this queue to contain a null buffer, and be part of the // Initialize this queue to contain a null buffer, and be part of the
// given PtrQueueSet. // given PtrQueueSet.
PtrQueue(PtrQueueSet*, bool perm = false, bool active = false); PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
// Release any contained resources. // Release any contained resources.
void flush(); void flush();
// Calls flush() when destroyed. // Calls flush() when destroyed.
@ -85,6 +85,14 @@ public:
else enqueue_known_active(ptr); else enqueue_known_active(ptr);
} }
// This method is called when we're doing the zero index handling
// and gives a chance to the queues to do any pre-enqueueing
// processing they might want to do on the buffer. It should return
// true if the buffer should be enqueued, or false if enough
// entries were cleared from it so that it can be re-used. It should
// not return false if the buffer is still full (otherwise we can
// get into an infinite loop).
virtual bool should_enqueue_buffer() { return true; }
void handle_zero_index(); void handle_zero_index();
void locking_enqueue_completed_buffer(void** buf); void locking_enqueue_completed_buffer(void** buf);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,12 +23,98 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/satbQueue.hpp" #include "gc_implementation/g1/satbQueue.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "memory/sharedHeap.hpp" #include "memory/sharedHeap.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
#include "runtime/thread.hpp" #include "runtime/thread.hpp"
// This method removes entries from an SATB buffer that will not be
// useful to the concurrent marking threads. An entry is removed if it
// satisfies one of the following conditions:
//
// * it points to an object outside the G1 heap (G1's concurrent
// marking only visits objects inside the G1 heap),
// * it points to an object that has been allocated since marking
// started (according to SATB those objects do not need to be
// visited during marking), or
// * it points to an object that has already been marked (no need to
// process it again).
//
// The rest of the entries will be retained and are compacted towards
// the top of the buffer. If with this filtering we clear a large
// enough chunk of the buffer we can re-use it (instead of enqueueing
// it) and we can just allow the mutator to carry on executing.
bool ObjPtrQueue::should_enqueue_buffer() {
assert(_lock == NULL || _lock->owned_by_self(),
"we should have taken the lock before calling this");
// A value of 0 means "don't filter SATB buffers".
if (G1SATBBufferEnqueueingThresholdPercent == 0) {
return true;
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// This method should only be called if there is a non-NULL buffer
// that is full.
assert(_index == 0, "pre-condition");
assert(_buf != NULL, "pre-condition");
void** buf = _buf;
size_t sz = _sz;
// Used for sanity checking at the end of the loop.
debug_only(size_t entries = 0; size_t retained = 0;)
size_t i = sz;
size_t new_index = sz;
// Given that we are expecting _index == 0, we could have changed
// the loop condition to (i > 0). But we are using _index for
// generality.
while (i > _index) {
assert(i > 0, "we should have at least one more entry to process");
i -= oopSize;
debug_only(entries += 1;)
oop* p = (oop*) &buf[byte_index_to_index((int) i)];
oop obj = *p;
// NULL the entry so that unused parts of the buffer contain NULLs
// at the end. If we are going to retain it we will copy it to its
// final place. If we have retained all entries we have visited so
// far, we'll just end up copying it to the same place.
*p = NULL;
bool retain = g1h->is_obj_ill(obj);
if (retain) {
assert(new_index > 0, "we should not have already filled up the buffer");
new_index -= oopSize;
assert(new_index >= i,
"new_index should never be below i, as we alwaysr compact 'up'");
oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
assert(new_p >= p, "the destination location should never be below "
"the source as we always compact 'up'");
assert(*new_p == NULL,
"we should have already cleared the destination location");
*new_p = obj;
debug_only(retained += 1;)
}
}
size_t entries_calc = (sz - _index) / oopSize;
assert(entries == entries_calc, "the number of entries we counted "
"should match the number of entries we calculated");
size_t retained_calc = (sz - new_index) / oopSize;
assert(retained == retained_calc, "the number of retained entries we counted "
"should match the number of retained entries we calculated");
size_t perc = retained_calc * 100 / entries_calc;
bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
_index = new_index;
return should_enqueue;
}
void ObjPtrQueue::apply_closure(ObjectClosure* cl) { void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
if (_buf != NULL) { if (_buf != NULL) {
apply_closure_to_buffer(cl, _buf, _index, _sz); apply_closure_to_buffer(cl, _buf, _index, _sz);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -33,13 +33,18 @@ class JavaThread;
// A ptrQueue whose elements are "oops", pointers to object heads. // A ptrQueue whose elements are "oops", pointers to object heads.
class ObjPtrQueue: public PtrQueue { class ObjPtrQueue: public PtrQueue {
public: public:
ObjPtrQueue(PtrQueueSet* qset_, bool perm = false) : ObjPtrQueue(PtrQueueSet* qset, bool perm = false) :
// SATB queues are only active during marking cycles. We create // SATB queues are only active during marking cycles. We create
// them with their active field set to false. If a thread is // them with their active field set to false. If a thread is
// created during a cycle and its SATB queue needs to be activated // created during a cycle and its SATB queue needs to be activated
// before the thread starts running, we'll need to set its active // before the thread starts running, we'll need to set its active
// field to true. This is done in JavaThread::initialize_queues(). // field to true. This is done in JavaThread::initialize_queues().
PtrQueue(qset_, perm, false /* active */) { } PtrQueue(qset, perm, false /* active */) { }
// Overrides PtrQueue::should_enqueue_buffer(). See the method's
// definition for more information.
virtual bool should_enqueue_buffer();
// Apply the closure to all elements, and reset the index to make the // Apply the closure to all elements, and reset the index to make the
// buffer empty. // buffer empty.
void apply_closure(ObjectClosure* cl); void apply_closure(ObjectClosure* cl);

View File

@ -38,7 +38,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(
} }
void VM_G1CollectForAllocation::doit() { void VM_G1CollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded); _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
assert(_result == NULL || _pause_succeeded, assert(_result == NULL || _pause_succeeded,
@ -46,7 +45,6 @@ void VM_G1CollectForAllocation::doit() {
} }
void VM_G1CollectFull::doit() { void VM_G1CollectFull::doit() {
JvmtiGCFullMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, _gc_cause); GCCauseSetter x(g1h, _gc_cause);
g1h->do_full_collection(false /* clear_all_soft_refs */); g1h->do_full_collection(false /* clear_all_soft_refs */);
@ -72,7 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
} }
void VM_G1IncCollectionPause::doit() { void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark || assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||

View File

@ -258,6 +258,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
BiasedLocking::restore_marks(); BiasedLocking::restore_marks();
Threads::gc_epilogue(); Threads::gc_epilogue();
CodeCache::gc_epilogue(); CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); COMPILER2_PRESENT(DerivedPointerTable::update_pointers());

View File

@ -1054,6 +1054,7 @@ void PSParallelCompact::post_compact()
Threads::gc_epilogue(); Threads::gc_epilogue();
CodeCache::gc_epilogue(); CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); COMPILER2_PRESENT(DerivedPointerTable::update_pointers());

View File

@ -42,8 +42,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
} }
void VM_ParallelGCFailedAllocation::doit() { void VM_ParallelGCFailedAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::MINOR);
notify_gc_begin(false);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
@ -54,8 +53,6 @@ void VM_ParallelGCFailedAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size, VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size,
@ -67,8 +64,7 @@ VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(s
} }
void VM_ParallelGCFailedPermanentAllocation::doit() { void VM_ParallelGCFailedPermanentAllocation::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
@ -78,7 +74,6 @@ void VM_ParallelGCFailedPermanentAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
// Only used for System.gc() calls // Only used for System.gc() calls
@ -91,8 +86,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count,
} }
void VM_ParallelGCSystemGC::doit() { void VM_ParallelGCSystemGC::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
@ -106,5 +100,4 @@ void VM_ParallelGCSystemGC::doit() {
} else { } else {
heap->invoke_full_gc(false); heap->invoke_full_gc(false);
} }
notify_gc_end();
} }

View File

@ -31,7 +31,6 @@
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp" #include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp" #include "oops/instanceRefKlass.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/init.hpp" #include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
@ -40,6 +39,7 @@
#ifndef SERIALGC #ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#endif #endif
HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool); HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
HS_DTRACE_PROBE_DECL(hotspot, gc__end); HS_DTRACE_PROBE_DECL(hotspot, gc__end);
@ -158,8 +158,7 @@ void VM_GC_HeapInspection::doit() {
void VM_GenCollectForAllocation::doit() { void VM_GenCollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::MINOR);
notify_gc_begin(false);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
@ -169,22 +168,19 @@ void VM_GenCollectForAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) { if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
void VM_GenCollectFull::doit() { void VM_GenCollectFull::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
notify_gc_end();
} }
void VM_GenCollectForPermanentAllocation::doit() { void VM_GenCollectForPermanentAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
SharedHeap* heap = (SharedHeap*)Universe::heap(); SharedHeap* heap = (SharedHeap*)Universe::heap();
GCCauseSetter gccs(heap, _gc_cause); GCCauseSetter gccs(heap, _gc_cause);
switch (heap->kind()) { switch (heap->kind()) {
@ -209,5 +205,4 @@ void VM_GenCollectForPermanentAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) { if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }

View File

@ -30,6 +30,7 @@
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/synchronizer.hpp" #include "runtime/synchronizer.hpp"
#include "runtime/vm_operations.hpp" #include "runtime/vm_operations.hpp"
#include "prims/jvmtiExport.hpp"
// The following class hierarchy represents // The following class hierarchy represents
// a set of operations (VM_Operation) related to GC. // a set of operations (VM_Operation) related to GC.
@ -209,13 +210,17 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
HeapWord* result() const { return _res; } HeapWord* result() const { return _res; }
}; };
class DTraceGCProbeMarker : public StackObj { class SvcGCMarker : public StackObj {
public: private:
DTraceGCProbeMarker(bool full) { JvmtiGCMarker _jgcm;
VM_GC_Operation::notify_gc_begin(full); public:
typedef enum { MINOR, FULL, OTHER } reason_type;
SvcGCMarker(reason_type reason ) {
VM_GC_Operation::notify_gc_begin(reason == FULL);
} }
~DTraceGCProbeMarker() { ~SvcGCMarker() {
VM_GC_Operation::notify_gc_end(); VM_GC_Operation::notify_gc_end();
} }
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,30 +34,6 @@
// Implementation of Bytecode // Implementation of Bytecode
bool Bytecode::check_must_rewrite(Bytecodes::Code code) const {
assert(Bytecodes::can_rewrite(code), "post-check only");
// Some codes are conditionally rewriting. Look closely at them.
switch (code) {
case Bytecodes::_aload_0:
// Even if RewriteFrequentPairs is turned on,
// the _aload_0 code might delay its rewrite until
// a following _getfield rewrites itself.
return false;
case Bytecodes::_lookupswitch:
return false; // the rewrite is not done by the interpreter
case Bytecodes::_new:
// (Could actually look at the class here, but the profit would be small.)
return false; // the rewrite is not always done
}
// No other special cases.
return true;
}
#ifdef ASSERT #ifdef ASSERT
void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const { void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const {
@ -188,17 +164,16 @@ int Bytecode_member_ref::index() const {
// Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4, // Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
// at the same time it allocates per-call-site CP cache entries. // at the same time it allocates per-call-site CP cache entries.
Bytecodes::Code rawc = code(); Bytecodes::Code rawc = code();
Bytecode* invoke = bytecode(); if (has_index_u4(rawc))
if (invoke->has_index_u4(rawc)) return get_index_u4(rawc);
return invoke->get_index_u4(rawc);
else else
return invoke->get_index_u2_cpcache(rawc); return get_index_u2_cpcache(rawc);
} }
int Bytecode_member_ref::pool_index() const { int Bytecode_member_ref::pool_index() const {
int index = this->index(); int index = this->index();
DEBUG_ONLY({ DEBUG_ONLY({
if (!bytecode()->has_index_u4(code())) if (!has_index_u4(code()))
index -= constantPoolOopDesc::CPCACHE_INDEX_TAG; index -= constantPoolOopDesc::CPCACHE_INDEX_TAG;
}); });
return _method->constants()->cache()->entry_at(index)->constant_pool_index(); return _method->constants()->cache()->entry_at(index)->constant_pool_index();
@ -214,13 +189,12 @@ void Bytecode_field::verify() const {
// Implementation of Bytecode_loadconstant // Implementation of Bytecode_loadconstant
int Bytecode_loadconstant::raw_index() const { int Bytecode_loadconstant::raw_index() const {
Bytecode* bcp = bytecode(); Bytecodes::Code rawc = code();
Bytecodes::Code rawc = bcp->code();
assert(rawc != Bytecodes::_wide, "verifier prevents this"); assert(rawc != Bytecodes::_wide, "verifier prevents this");
if (Bytecodes::java_code(rawc) == Bytecodes::_ldc) if (Bytecodes::java_code(rawc) == Bytecodes::_ldc)
return bcp->get_index_u1(rawc); return get_index_u1(rawc);
else else
return bcp->get_index_u2(rawc, false); return get_index_u2(rawc, false);
} }
int Bytecode_loadconstant::pool_index() const { int Bytecode_loadconstant::pool_index() const {
@ -258,7 +232,7 @@ void Bytecode_lookupswitch::verify() const {
case Bytecodes::_lookupswitch: case Bytecodes::_lookupswitch:
{ int i = number_of_pairs() - 1; { int i = number_of_pairs() - 1;
while (i-- > 0) { while (i-- > 0) {
assert(pair_at(i)->match() < pair_at(i+1)->match(), "unsorted table entries"); assert(pair_at(i).match() < pair_at(i+1).match(), "unsorted table entries");
} }
} }
break; break;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,14 +38,20 @@
# include "bytes_zero.hpp" # include "bytes_zero.hpp"
#endif #endif
// Base class for different kinds of abstractions working class ciBytecodeStream;
// relative to an objects 'this' pointer.
// The base class for different kinds of bytecode abstractions.
// Provides the primitive operations to manipulate code relative
// to the bcp.
class Bytecode: public StackObj {
protected:
const address _bcp;
const Bytecodes::Code _code;
class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
public:
// Address computation // Address computation
address addr_at (int offset) const { return (address)this + offset; } address addr_at (int offset) const { return (address)_bcp + offset; }
int byte_at (int offset) const { return *(addr_at(offset)); } u_char byte_at(int offset) const { return *addr_at(offset); }
address aligned_addr_at (int offset) const { return (address)round_to((intptr_t)addr_at(offset), jintSize); } address aligned_addr_at (int offset) const { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
int aligned_offset (int offset) const { return aligned_addr_at(offset) - addr_at(0); } int aligned_offset (int offset) const { return aligned_addr_at(offset) - addr_at(0); }
@ -54,31 +60,20 @@ class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
int get_Java_u4_at (int offset) const { return Bytes::get_Java_u4(addr_at(offset)); } int get_Java_u4_at (int offset) const { return Bytes::get_Java_u4(addr_at(offset)); }
int get_native_u2_at (int offset) const { return Bytes::get_native_u2(addr_at(offset)); } int get_native_u2_at (int offset) const { return Bytes::get_native_u2(addr_at(offset)); }
int get_native_u4_at (int offset) const { return Bytes::get_native_u4(addr_at(offset)); } int get_native_u4_at (int offset) const { return Bytes::get_native_u4(addr_at(offset)); }
};
// The base class for different kinds of bytecode abstractions.
// Provides the primitive operations to manipulate code relative
// to an objects 'this' pointer.
// FIXME: Make this a ResourceObj, include the enclosing methodOop, and cache the opcode.
class Bytecode: public ThisRelativeObj {
protected:
u_char byte_at(int offset) const { return *addr_at(offset); }
bool check_must_rewrite(Bytecodes::Code bc) const;
public: public:
Bytecode(methodOop method, address bcp): _bcp(bcp), _code(Bytecodes::code_at(method, addr_at(0))) {
assert(method != NULL, "this form requires a valid methodOop");
}
// Defined in ciStreams.hpp
inline Bytecode(const ciBytecodeStream* stream, address bcp = NULL);
// Attributes // Attributes
address bcp() const { return addr_at(0); } address bcp() const { return _bcp; }
int instruction_size() const { return Bytecodes::length_at(bcp()); } int instruction_size() const { return Bytecodes::length_for_code_at(_code, bcp()); }
// Warning: Use code() with caution on live bytecode streams. 4926272 Bytecodes::Code code() const { return _code; }
Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); }
Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); } Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); }
bool must_rewrite(Bytecodes::Code code) const { return Bytecodes::can_rewrite(code) && check_must_rewrite(code); }
// Creation
inline friend Bytecode* Bytecode_at(address bcp);
// Static functions for parsing bytecodes in place. // Static functions for parsing bytecodes in place.
int get_index_u1(Bytecodes::Code bc) const { int get_index_u1(Bytecodes::Code bc) const {
@ -89,7 +84,7 @@ class Bytecode: public ThisRelativeObj {
assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide); assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide);
address p = addr_at(is_wide ? 2 : 1); address p = addr_at(is_wide ? 2 : 1);
if (can_use_native_byte_order(bc, is_wide)) if (can_use_native_byte_order(bc, is_wide))
return Bytes::get_native_u2(p); return Bytes::get_native_u2(p);
else return Bytes::get_Java_u2(p); else return Bytes::get_Java_u2(p);
} }
int get_index_u1_cpcache(Bytecodes::Code bc) const { int get_index_u1_cpcache(Bytecodes::Code bc) const {
@ -138,20 +133,17 @@ class Bytecode: public ThisRelativeObj {
} }
}; };
inline Bytecode* Bytecode_at(address bcp) {
// Warning: Use with caution on live bytecode streams. 4926272
return (Bytecode*)bcp;
}
// Abstractions for lookupswitch bytecode // Abstractions for lookupswitch bytecode
class LookupswitchPair VALUE_OBJ_CLASS_SPEC {
class LookupswitchPair: ThisRelativeObj {
private: private:
int _match; const address _bcp;
int _offset;
address addr_at (int offset) const { return _bcp + offset; }
int get_Java_u4_at (int offset) const { return Bytes::get_Java_u4(addr_at(offset)); }
public: public:
LookupswitchPair(address bcp): _bcp(bcp) {}
int match() const { return get_Java_u4_at(0 * jintSize); } int match() const { return get_Java_u4_at(0 * jintSize); }
int offset() const { return get_Java_u4_at(1 * jintSize); } int offset() const { return get_Java_u4_at(1 * jintSize); }
}; };
@ -159,26 +151,25 @@ class LookupswitchPair: ThisRelativeObj {
class Bytecode_lookupswitch: public Bytecode { class Bytecode_lookupswitch: public Bytecode {
public: public:
Bytecode_lookupswitch(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
// Defined in ciStreams.hpp
inline Bytecode_lookupswitch(const ciBytecodeStream* stream);
void verify() const PRODUCT_RETURN; void verify() const PRODUCT_RETURN;
// Attributes // Attributes
int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); } int default_offset() const { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
int number_of_pairs() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); } int number_of_pairs() const { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
LookupswitchPair* pair_at(int i) const { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds"); LookupswitchPair pair_at(int i) const {
return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); } assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
// Creation return LookupswitchPair(aligned_addr_at(1 + (1 + i)*2*jintSize));
inline friend Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp); }
}; };
inline Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp) {
Bytecode_lookupswitch* b = (Bytecode_lookupswitch*)bcp;
DEBUG_ONLY(b->verify());
return b;
}
class Bytecode_tableswitch: public Bytecode { class Bytecode_tableswitch: public Bytecode {
public: public:
Bytecode_tableswitch(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
// Defined in ciStreams.hpp
inline Bytecode_tableswitch(const ciBytecodeStream* stream);
void verify() const PRODUCT_RETURN; void verify() const PRODUCT_RETURN;
// Attributes // Attributes
@ -187,52 +178,36 @@ class Bytecode_tableswitch: public Bytecode {
int high_key() const { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); } int high_key() const { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); }
int dest_offset_at(int i) const; int dest_offset_at(int i) const;
int length() { return high_key()-low_key()+1; } int length() { return high_key()-low_key()+1; }
// Creation
inline friend Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp);
}; };
inline Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp) {
Bytecode_tableswitch* b = (Bytecode_tableswitch*)bcp;
DEBUG_ONLY(b->verify());
return b;
}
// Common code for decoding invokes and field references. // Common code for decoding invokes and field references.
class Bytecode_member_ref: public ResourceObj { class Bytecode_member_ref: public Bytecode {
protected: protected:
methodHandle _method; // method containing the bytecode const methodHandle _method; // method containing the bytecode
int _bci; // position of the bytecode
Bytecode_member_ref(methodHandle method, int bci) : _method(method), _bci(bci) {} Bytecode_member_ref(methodHandle method, int bci) : Bytecode(method(), method()->bcp_from(bci)), _method(method) {}
methodHandle method() const { return _method; }
public: public:
// Attributes
methodHandle method() const { return _method; }
int bci() const { return _bci; }
address bcp() const { return _method->bcp_from(bci()); }
Bytecode* bytecode() const { return Bytecode_at(bcp()); }
int index() const; // cache index (loaded from instruction) int index() const; // cache index (loaded from instruction)
int pool_index() const; // constant pool index int pool_index() const; // constant pool index
symbolOop name() const; // returns the name of the method or field symbolOop name() const; // returns the name of the method or field
symbolOop signature() const; // returns the signature of the method or field symbolOop signature() const; // returns the signature of the method or field
BasicType result_type(Thread* thread) const; // returns the result type of the getfield or invoke BasicType result_type(Thread* thread) const; // returns the result type of the getfield or invoke
Bytecodes::Code code() const { return Bytecodes::code_at(bcp(), _method()); }
Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); }
}; };
// Abstraction for invoke_{virtual, static, interface, special} // Abstraction for invoke_{virtual, static, interface, special}
class Bytecode_invoke: public Bytecode_member_ref { class Bytecode_invoke: public Bytecode_member_ref {
protected: protected:
Bytecode_invoke(methodHandle method, int bci) : Bytecode_member_ref(method, bci) {} // Constructor that skips verification
Bytecode_invoke(methodHandle method, int bci, bool unused) : Bytecode_member_ref(method, bci) {}
public: public:
Bytecode_invoke(methodHandle method, int bci) : Bytecode_member_ref(method, bci) { verify(); }
void verify() const; void verify() const;
// Attributes // Attributes
@ -253,31 +228,20 @@ class Bytecode_invoke: public Bytecode_member_ref {
is_invokespecial() || is_invokespecial() ||
is_invokedynamic(); } is_invokedynamic(); }
// Creation // Helper to skip verification. Used is_valid() to check if the result is really an invoke
inline friend Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci); inline friend Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci);
// Like Bytecode_invoke_at. Instead it returns NULL if the bci is not at an invoke.
inline friend Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci);
}; };
inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) { inline Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci) {
Bytecode_invoke* b = new Bytecode_invoke(method, bci); return Bytecode_invoke(method, bci, false);
DEBUG_ONLY(b->verify());
return b;
}
inline Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci) {
Bytecode_invoke* b = new Bytecode_invoke(method, bci);
return b->is_valid() ? b : NULL;
} }
// Abstraction for all field accesses (put/get field/static) // Abstraction for all field accesses (put/get field/static)
class Bytecode_field: public Bytecode_member_ref { class Bytecode_field: public Bytecode_member_ref {
protected:
Bytecode_field(methodHandle method, int bci) : Bytecode_member_ref(method, bci) {}
public: public:
Bytecode_field(methodHandle method, int bci) : Bytecode_member_ref(method, bci) { verify(); }
// Testers // Testers
bool is_getfield() const { return java_code() == Bytecodes::_getfield; } bool is_getfield() const { return java_code() == Bytecodes::_getfield; }
bool is_putfield() const { return java_code() == Bytecodes::_putfield; } bool is_putfield() const { return java_code() == Bytecodes::_putfield; }
@ -292,131 +256,64 @@ class Bytecode_field: public Bytecode_member_ref {
is_getstatic() || is_getstatic() ||
is_putstatic(); } is_putstatic(); }
void verify() const; void verify() const;
// Creation
inline friend Bytecode_field* Bytecode_field_at(methodHandle method, int bci);
}; };
inline Bytecode_field* Bytecode_field_at(methodHandle method, int bci) {
Bytecode_field* b = new Bytecode_field(method, bci);
DEBUG_ONLY(b->verify());
return b;
}
// Abstraction for checkcast // Abstraction for checkcast
class Bytecode_checkcast: public Bytecode { class Bytecode_checkcast: public Bytecode {
public: public:
Bytecode_checkcast(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); } void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
// Returns index // Returns index
long index() const { return get_index_u2(Bytecodes::_checkcast); }; long index() const { return get_index_u2(Bytecodes::_checkcast); };
// Creation
inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
}; };
inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) {
Bytecode_checkcast* b = (Bytecode_checkcast*)bcp;
DEBUG_ONLY(b->verify());
return b;
}
// Abstraction for instanceof // Abstraction for instanceof
class Bytecode_instanceof: public Bytecode { class Bytecode_instanceof: public Bytecode {
public: public:
Bytecode_instanceof(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); } void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
// Returns index // Returns index
long index() const { return get_index_u2(Bytecodes::_instanceof); }; long index() const { return get_index_u2(Bytecodes::_instanceof); };
// Creation
inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
}; };
inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) {
Bytecode_instanceof* b = (Bytecode_instanceof*)bcp;
DEBUG_ONLY(b->verify());
return b;
}
class Bytecode_new: public Bytecode { class Bytecode_new: public Bytecode {
public: public:
Bytecode_new(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
void verify() const { assert(java_code() == Bytecodes::_new, "check new"); } void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
// Returns index // Returns index
long index() const { return get_index_u2(Bytecodes::_new); }; long index() const { return get_index_u2(Bytecodes::_new); };
// Creation
inline friend Bytecode_new* Bytecode_new_at(address bcp);
}; };
inline Bytecode_new* Bytecode_new_at(address bcp) {
Bytecode_new* b = (Bytecode_new*)bcp;
DEBUG_ONLY(b->verify());
return b;
}
class Bytecode_multianewarray: public Bytecode { class Bytecode_multianewarray: public Bytecode {
public: public:
Bytecode_multianewarray(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); } void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
// Returns index // Returns index
long index() const { return get_index_u2(Bytecodes::_multianewarray); }; long index() const { return get_index_u2(Bytecodes::_multianewarray); };
// Creation
inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
}; };
inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) {
Bytecode_multianewarray* b = (Bytecode_multianewarray*)bcp;
DEBUG_ONLY(b->verify());
return b;
}
class Bytecode_anewarray: public Bytecode { class Bytecode_anewarray: public Bytecode {
public: public:
Bytecode_anewarray(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); } void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
// Returns index // Returns index
long index() const { return get_index_u2(Bytecodes::_anewarray); }; long index() const { return get_index_u2(Bytecodes::_anewarray); };
// Creation
inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);
}; };
inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) {
Bytecode_anewarray* b = (Bytecode_anewarray*)bcp;
DEBUG_ONLY(b->verify());
return b;
}
// Abstraction for ldc, ldc_w and ldc2_w // Abstraction for ldc, ldc_w and ldc2_w
class Bytecode_loadconstant: public Bytecode {
class Bytecode_loadconstant: public ResourceObj {
private: private:
int _bci; const methodHandle _method;
methodHandle _method;
Bytecodes::Code code() const { return bytecode()->code(); }
int raw_index() const; int raw_index() const;
Bytecode_loadconstant(methodHandle method, int bci) : _method(method), _bci(bci) {}
public: public:
// Attributes Bytecode_loadconstant(methodHandle method, int bci): Bytecode(method(), method->bcp_from(bci)), _method(method) { verify(); }
methodHandle method() const { return _method; }
int bci() const { return _bci; }
address bcp() const { return _method->bcp_from(bci()); }
Bytecode* bytecode() const { return Bytecode_at(bcp()); }
void verify() const { void verify() const {
assert(_method.not_null(), "must supply method"); assert(_method.not_null(), "must supply method");
@ -437,15 +334,6 @@ class Bytecode_loadconstant: public ResourceObj {
BasicType result_type() const; // returns the result type of the ldc BasicType result_type() const; // returns the result type of the ldc
oop resolve_constant(TRAPS) const; oop resolve_constant(TRAPS) const;
// Creation
inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci);
}; };
inline Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci) {
Bytecode_loadconstant* b = new Bytecode_loadconstant(method, bci);
DEBUG_ONLY(b->verify());
return b;
}
#endif // SHARE_VM_INTERPRETER_BYTECODE_HPP #endif // SHARE_VM_INTERPRETER_BYTECODE_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -831,11 +831,11 @@ BytecodeInterpreter::run(interpreterState istate) {
// much like trying to deopt at a poll return. In that has we simply // much like trying to deopt at a poll return. In that has we simply
// get out of here // get out of here
// //
if ( Bytecodes::code_at(pc, METHOD) == Bytecodes::_return_register_finalizer) { if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
// this will do the right thing even if an exception is pending. // this will do the right thing even if an exception is pending.
goto handle_return; goto handle_return;
} }
UPDATE_PC(Bytecodes::length_at(pc)); UPDATE_PC(Bytecodes::length_at(METHOD, pc));
if (THREAD->has_pending_exception()) goto handle_exception; if (THREAD->has_pending_exception()) goto handle_exception;
goto run; goto run;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,7 @@ void BaseBytecodeStream::assert_raw_index_size(int size) const {
// in raw mode, pretend indy is "bJJ__" // in raw mode, pretend indy is "bJJ__"
assert(size == 2, "raw invokedynamic instruction has 2-byte index only"); assert(size == 2, "raw invokedynamic instruction has 2-byte index only");
} else { } else {
bytecode()->assert_index_size(size, raw_code(), is_wide()); bytecode().assert_index_size(size, raw_code(), is_wide());
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -105,14 +105,14 @@ class BaseBytecodeStream: StackObj {
bool is_last_bytecode() const { return _next_bci >= _end_bci; } bool is_last_bytecode() const { return _next_bci >= _end_bci; }
address bcp() const { return method()->code_base() + _bci; } address bcp() const { return method()->code_base() + _bci; }
Bytecode* bytecode() const { return Bytecode_at(bcp()); } Bytecode bytecode() const { return Bytecode(_method(), bcp()); }
// State changes // State changes
void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; } void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
// Bytecode-specific attributes // Bytecode-specific attributes
int dest() const { return bci() + bytecode()->get_offset_s2(raw_code()); } int dest() const { return bci() + bytecode().get_offset_s2(raw_code()); }
int dest_w() const { return bci() + bytecode()->get_offset_s4(raw_code()); } int dest_w() const { return bci() + bytecode().get_offset_s4(raw_code()); }
// One-byte indices. // One-byte indices.
int get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); } int get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
@ -189,7 +189,7 @@ class BytecodeStream: public BaseBytecodeStream {
} else { } else {
// get bytecode // get bytecode
address bcp = this->bcp(); address bcp = this->bcp();
raw_code = Bytecodes::code_at(bcp); raw_code = Bytecodes::code_at(_method(), bcp);
code = Bytecodes::java_code(raw_code); code = Bytecodes::java_code(raw_code);
// set next bytecode position // set next bytecode position
// //
@ -197,7 +197,7 @@ class BytecodeStream: public BaseBytecodeStream {
// tty bytecode otherwise the stepping is wrong! // tty bytecode otherwise the stepping is wrong!
// (carefull: length_for(...) must be used first!) // (carefull: length_for(...) must be used first!)
int l = Bytecodes::length_for(code); int l = Bytecodes::length_for(code);
if (l == 0) l = Bytecodes::length_at(bcp); if (l == 0) l = Bytecodes::length_at(_method(), bcp);
_next_bci += l; _next_bci += l;
assert(_bci < _next_bci, "length must be > 0"); assert(_bci < _next_bci, "length must be > 0");
// set attributes // set attributes
@ -219,16 +219,16 @@ class BytecodeStream: public BaseBytecodeStream {
Bytecodes::Code code() const { return _code; } Bytecodes::Code code() const { return _code; }
// Unsigned indices, widening // Unsigned indices, widening
int get_index() const { return is_wide() ? bytecode()->get_index_u2(raw_code(), true) : get_index_u1(); } int get_index() const { return is_wide() ? bytecode().get_index_u2(raw_code(), true) : get_index_u1(); }
// Get an unsigned 2-byte index, swapping the bytes if necessary. // Get an unsigned 2-byte index, swapping the bytes if necessary.
int get_index_u2() const { assert_raw_stream(false); int get_index_u2() const { assert_raw_stream(false);
return bytecode()->get_index_u2(raw_code(), false); } return bytecode().get_index_u2(raw_code(), false); }
// Get an unsigned 2-byte index in native order. // Get an unsigned 2-byte index in native order.
int get_index_u2_cpcache() const { assert_raw_stream(false); int get_index_u2_cpcache() const { assert_raw_stream(false);
return bytecode()->get_index_u2_cpcache(raw_code()); } return bytecode().get_index_u2_cpcache(raw_code()); }
int get_index_u4() const { assert_raw_stream(false); int get_index_u4() const { assert_raw_stream(false);
return bytecode()->get_index_u4(raw_code()); } return bytecode().get_index_u4(raw_code()); }
bool has_index_u4() const { return bytecode()->has_index_u4(raw_code()); } bool has_index_u4() const { return bytecode().has_index_u4(raw_code()); }
}; };
#endif // SHARE_VM_INTERPRETER_BYTECODESTREAM_HPP #endif // SHARE_VM_INTERPRETER_BYTECODESTREAM_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -100,9 +100,9 @@ class BytecodePrinter: public BytecodeClosure {
Bytecodes::Code code; Bytecodes::Code code;
if (is_wide()) { if (is_wide()) {
// bcp wasn't advanced if previous bytecode was _wide. // bcp wasn't advanced if previous bytecode was _wide.
code = Bytecodes::code_at(bcp+1); code = Bytecodes::code_at(method(), bcp+1);
} else { } else {
code = Bytecodes::code_at(bcp); code = Bytecodes::code_at(method(), bcp);
} }
_code = code; _code = code;
int bci = bcp - method->code_base(); int bci = bcp - method->code_base();
@ -127,11 +127,11 @@ class BytecodePrinter: public BytecodeClosure {
void trace(methodHandle method, address bcp, outputStream* st) { void trace(methodHandle method, address bcp, outputStream* st) {
_current_method = method(); _current_method = method();
ResourceMark rm; ResourceMark rm;
Bytecodes::Code code = Bytecodes::code_at(bcp); Bytecodes::Code code = Bytecodes::code_at(method(), bcp);
// Set is_wide // Set is_wide
_is_wide = (code == Bytecodes::_wide); _is_wide = (code == Bytecodes::_wide);
if (is_wide()) { if (is_wide()) {
code = Bytecodes::code_at(bcp+1); code = Bytecodes::code_at(method(), bcp+1);
} }
_code = code; _code = code;
int bci = bcp - method->code_base(); int bci = bcp - method->code_base();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -54,18 +54,46 @@ u_char Bytecodes::_lengths [Bytecodes::number_of_codes];
Bytecodes::Code Bytecodes::_java_code [Bytecodes::number_of_codes]; Bytecodes::Code Bytecodes::_java_code [Bytecodes::number_of_codes];
u_short Bytecodes::_flags [(1<<BitsPerByte)*2]; u_short Bytecodes::_flags [(1<<BitsPerByte)*2];
#ifdef ASSERT
bool Bytecodes::check_method(const methodOopDesc* method, address bcp) {
return method->contains(bcp);
}
#endif
Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) { bool Bytecodes::check_must_rewrite(Bytecodes::Code code) {
return code_at(method->bcp_from(bci), method); assert(can_rewrite(code), "post-check only");
// Some codes are conditionally rewriting. Look closely at them.
switch (code) {
case Bytecodes::_aload_0:
// Even if RewriteFrequentPairs is turned on,
// the _aload_0 code might delay its rewrite until
// a following _getfield rewrites itself.
return false;
case Bytecodes::_lookupswitch:
return false; // the rewrite is not done by the interpreter
case Bytecodes::_new:
// (Could actually look at the class here, but the profit would be small.)
return false; // the rewrite is not always done
}
// No other special cases.
return true;
} }
Bytecodes::Code Bytecodes::non_breakpoint_code_at(address bcp, methodOop method) { Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) {
if (method == NULL) method = methodOopDesc::method_from_bcp(bcp); return code_at(method, method->bcp_from(bci));
}
Bytecodes::Code Bytecodes::non_breakpoint_code_at(const methodOopDesc* method, address bcp) {
assert(method != NULL, "must have the method for breakpoint conversion");
assert(method->contains(bcp), "must be valid bcp in method");
return method->orig_bytecode_at(method->bci_from(bcp)); return method->orig_bytecode_at(method->bci_from(bcp));
} }
int Bytecodes::special_length_at(address bcp, address end) { int Bytecodes::special_length_at(Bytecodes::Code code, address bcp, address end) {
Code code = code_at(bcp);
switch (code) { switch (code) {
case _wide: case _wide:
if (end != NULL && bcp + 1 >= end) { if (end != NULL && bcp + 1 >= end) {
@ -120,7 +148,7 @@ int Bytecodes::raw_special_length_at(address bcp, address end) {
if (code == _breakpoint) { if (code == _breakpoint) {
return 1; return 1;
} else { } else {
return special_length_at(bcp, end); return special_length_at(code, bcp, end);
} }
} }

Some files were not shown because too many files have changed in this diff Show More