This commit is contained in:
J. Duke 2017-07-05 18:00:09 +02:00
commit c1cefe0e75
176 changed files with 8752 additions and 5061 deletions

View File

@ -142,3 +142,4 @@ a4f28069d44a379cda99dd1d921d19f819726d22 jdk8-b15
7010bd24cdd07bc7daef80702f39124854dec36c jdk8-b18
237bc29afbfc6f56a4fe4a6008e2befb59c44bac jdk8-b19
5a5eaf6374bcbe23530899579fed17a05b7705f3 jdk8-b20
cc771d92284f71765eca14d6d08703c4af254c04 jdk8-b21

View File

@ -142,3 +142,4 @@ e59c47de1ad8982ff3b0e843773a6902b36c2337 jdk8-b14
312cf15d16577ef198b033d2a4cc0a52369b7343 jdk8-b18
e1366c5d84ef984095a332bcee70b3938232d07d jdk8-b19
51d8b6cb18c0978ecfa4f33e1537d35ee01b69fa jdk8-b20
f157fc2a71a38ce44007a6f18d5b011824dce705 jdk8-b21

View File

@ -209,3 +209,5 @@ a2fef924d8e6f37dac2a887315e3502876cc8e24 hs23-b08
4bcf61041217f8677dcec18e90e9196acc945bba hs23-b09
9232e0ecbc2cec54dcc8f93004fb00c214446460 jdk8-b19
fe2c8764998112b7fefcd7d41599714813ae4327 jdk8-b20
9952d1c439d64c5fd4ad1236a63a62bd5a49d4c3 jdk8-b21
513351373923f74a7c91755748b95c9771e59f96 hs23-b10

View File

@ -39,9 +39,16 @@ OS = $(Platform_os_family)
SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
SOURCES.AD = \
ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif
EXEC = $(OUTDIR)/adlc

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=23
HS_MINOR_VER=0
HS_BUILD_NUMBER=09
HS_BUILD_NUMBER=10
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -39,9 +39,16 @@ OS = $(Platform_os_family)
SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
SOURCES.AD = \
ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif
EXEC = $(OUTDIR)/adlc

View File

@ -40,9 +40,16 @@ OS = $(Platform_os_family)
SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
SOURCES.AD = \
ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif
EXEC = $(OUTDIR)/adlc

View File

@ -53,6 +53,17 @@ CPP_INCLUDE_DIRS=\
/I "$(WorkSpace)\src\os\windows\vm" \
/I "$(WorkSpace)\src\cpu\$(Platform_arch)\vm"
!if "$(Platform_arch_model)" == "$(Platform_arch)"
SOURCES_AD=\
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
!else
SOURCES_AD=\
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad \
$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
!endif
# NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_DIR
# and ProjectCreatorIDEOptions in projectcreator.make.
GENERATED_NAMES=\
@ -105,7 +116,6 @@ $(GENERATED_NAMES_IN_DIR): $(Platform_arch_model).ad adlc.exe
$(ADLC) $(ADLCFLAGS) $(Platform_arch_model).ad
mv $(GENERATED_NAMES) $(AdlcOutDir)/
$(Platform_arch_model).ad: $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
$(Platform_arch_model).ad: $(SOURCES_AD)
rm -f $(Platform_arch_model).ad
cat $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad >$(Platform_arch_model).ad
cat $(SOURCES_AD) >$(Platform_arch_model).ad

View File

@ -3036,10 +3036,8 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Label* L_failure,
Label* L_slow_path,
RegisterOrConstant super_check_offset) {
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
bool need_slow_path = (must_load_sco ||
@ -3159,10 +3157,8 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
assert(label_nulls <= 1, "at most one NULL in the batch");
// a couple of useful fields in sub_klass:
int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_supers_offset_in_bytes());
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
int ss_offset = in_bytes(Klass::secondary_supers_offset());
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
// Do a linear scan of the secondary super-klass chain.
// This code is rarely used, so simplicity is a virtue here.
@ -3336,7 +3332,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg);
andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
@ -3413,7 +3409,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that
@ -3443,7 +3439,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in

View File

@ -302,7 +302,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(_obj != noreg, "must be a valid register");
assert(_oop_index >= 0, "must have oop index");
__ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
__ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
__ ld_ptr(G3, in_bytes(instanceKlass::init_thread_offset()), G3);
__ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
// load_klass patches may execute the patched code before it's
@ -471,7 +471,7 @@ void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
__ load_klass(src_reg, tmp_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ ld(ref_type_adr, tmp_reg);
// _reference_type field is of type ReferenceType (enum)

View File

@ -2202,8 +2202,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(dst, tmp);
}
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
int lh_offset = in_bytes(Klass::layout_helper_offset());
__ lduw(tmp, lh_offset, tmp2);
@ -2238,12 +2237,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ mov(length, len);
__ load_klass(dst, tmp);
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
__ ld_ptr(tmp, ek_offset, super_k);
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ lduw(super_k, sco_offset, chk_off);
__ call_VM_leaf(tmp, copyfunc_addr);
@ -2455,8 +2452,8 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
op->obj()->as_register() == O0 &&
op->klass()->as_register() == G5, "must be");
if (op->init_check()) {
__ ld(op->klass()->as_register(),
instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc),
__ ldub(op->klass()->as_register(),
in_bytes(instanceKlass::init_state_offset()),
op->tmp1()->as_register());
add_debug_info_for_null_check_here(op->stub()->info());
__ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
@ -2627,7 +2624,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
@ -2731,7 +2728,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ load_klass(value, klass_RInfo);
// get instance klass
__ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo);
__ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset()), k_RInfo);
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);

View File

@ -181,7 +181,7 @@ void C1_MacroAssembler::try_allocate(
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len, t1, t2);
if (UseBiasedLocking && !len->is_valid()) {
ld_ptr(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes(), t1);
ld_ptr(klass, in_bytes(Klass::prototype_header_offset()), t1);
} else {
set((intx)markOopDesc::prototype(), t1);
}
@ -252,7 +252,7 @@ void C1_MacroAssembler::initialize_object(
#ifdef ASSERT
{
Label ok;
ld(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), t1);
ld(klass, in_bytes(Klass::layout_helper_offset()), t1);
if (var_size_in_bytes != noreg) {
cmp_and_brx_short(t1, var_size_in_bytes, Assembler::equal, Assembler::pt, ok);
} else {

View File

@ -398,14 +398,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized
__ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
__ ldub(G5_klass, in_bytes(instanceKlass::init_state_offset()), G3_t1);
__ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
}
#ifdef ASSERT
// assert object can be fast path allocated
{
Label ok, not_ok;
__ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
// make sure it's an instance (LH > 0)
__ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
__ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
@ -425,7 +425,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(retry_tlab);
// get the instance size
__ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
__ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
@ -437,7 +437,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden);
// get the instance size
__ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
__ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
__ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
@ -471,8 +471,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register G4_length = G4; // Incoming
Register O0_obj = O0; // Outgoing
Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize)
+ Klass::layout_helper_offset_in_bytes()));
Address klass_lh(G5_klass, Klass::layout_helper_offset());
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
// Use this offset to pick out an individual byte of the layout_helper:
@ -592,7 +591,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label register_finalizer;
Register t = O1;
__ load_klass(O0, t);
__ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t);
__ ld(t, in_bytes(Klass::access_flags_offset()), t);
__ set(JVM_ACC_HAS_FINALIZER, G3);
__ andcc(G3, t, G0);
__ br(Assembler::notZero, false, Assembler::pt, register_finalizer);

View File

@ -766,7 +766,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point(O0 is a good temp until the very end)
ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::native_function_offset())), O0);
// for static methods insert the mirror argument
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1);
__ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1);
@ -1173,7 +1173,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
__ btst(JVM_ACC_SYNCHRONIZED, O1);
__ br( Assembler::zero, false, Assembler::pt, done);
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ delayed()->btst(JVM_ACC_STATIC, O1);
__ ld_ptr(XXX_STATE(_locals), O1);
__ br( Assembler::zero, true, Assembler::pt, got_obj);

View File

@ -1098,7 +1098,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes());
Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int java_mirror_offset = in_bytes(Klass::java_mirror_offset());
if (have_entry(ek)) {
__ nop(); // empty stubs make SG sick

View File

@ -6773,6 +6773,16 @@ instruct unnecessary_membar_volatile() %{
ins_pipe(empty);
%}
instruct membar_storestore() %{
match(MemBarStoreStore);
ins_cost(0);
size(0);
format %{ "!MEMBAR-storestore (empty encoding)" %}
ins_encode( );
ins_pipe(empty);
%}
//----------Register Move Instructions-----------------------------------------
instruct roundDouble_nop(regD dst) %{
match(Set dst (RoundDouble dst));
@ -9273,6 +9283,7 @@ instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{
// (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
match(Jump switch_val);
effect(TEMP table);
ins_cost(350);
@ -10263,24 +10274,24 @@ instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, i
// ============================================================================
// inlined locking and unlocking
instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o7RegP scratch ) %{
instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
match(Set pcc (FastLock object box));
effect(KILL scratch, TEMP scratch2);
effect(TEMP scratch2, USE_KILL box, KILL scratch);
ins_cost(100);
format %{ "FASTLOCK $object, $box; KILL $scratch, $scratch2, $box" %}
format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
ins_encode( Fast_Lock(object, box, scratch, scratch2) );
ins_pipe(long_memory_op);
%}
instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o7RegP scratch ) %{
instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
match(Set pcc (FastUnlock object box));
effect(KILL scratch, TEMP scratch2);
effect(TEMP scratch2, USE_KILL box, KILL scratch);
ins_cost(100);
format %{ "FASTUNLOCK $object, $box; KILL $scratch, $scratch2, $box" %}
format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
ins_encode( Fast_Unlock(object, box, scratch, scratch2) );
ins_pipe(long_memory_op);
%}

View File

@ -3046,8 +3046,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
//
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
int lh_offset = in_bytes(Klass::layout_helper_offset());
// Load 32-bits signed value. Use br() instruction with it to check icc.
__ lduw(G3_src_klass, lh_offset, G5_lh);
@ -3194,15 +3193,13 @@ class StubGenerator: public StubCodeGenerator {
G4_dst_klass, G3_src_klass);
// Generate the type check.
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ lduw(G4_dst_klass, sco_offset, sco_temp);
generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
O5_temp, L_plain_copy);
// Fetch destination element klass from the objArrayKlass header.
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
// the checkcast_copy loop needs two extra arguments:
__ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass
@ -3414,6 +3411,9 @@ class StubGenerator: public StubCodeGenerator {
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
G5_method_type, G3_method_handle);
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
}
@ -3427,7 +3427,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
StubRoutines::_handler_for_unsafe_access_entry =
generate_handler_for_unsafe_access();

View File

@ -366,7 +366,7 @@ void InterpreterGenerator::lock_method(void) {
// get synchronization object to O0
{ Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ btst(JVM_ACC_STATIC, O0);
__ br( Assembler::zero, true, Assembler::pt, done);
__ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
@ -396,7 +396,6 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
Register Rscratch,
Register Rscratch2) {
const int page_size = os::vm_page_size();
Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
Label after_frame_check;
assert_different_registers(Rframe_size, Rscratch, Rscratch2);
@ -436,11 +435,19 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
// the bottom of the stack
__ cmp_and_brx_short(SP, Rscratch, Assembler::greater, Assembler::pt, after_frame_check);
// Save the return address as the exception pc
__ st_ptr(O7, saved_exception_pc);
// the stack will overflow, throw an exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
// Note that SP is restored to sender's sp (in the delay slot). This
// is necessary if the sender's frame is an extended compiled frame
// (see gen_c2i_adapter()) and safer anyway in case of JSR292
// adaptations.
// Note also that the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
__ jump_to(stub, Rscratch);
__ delayed()->mov(O5_savedSP, SP);
// if you get to here, then there is enough stack space
__ bind( after_frame_check );
@ -984,7 +991,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point(O0 is a good temp until the very end)
__ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
// for static methods insert the mirror argument
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
__ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);

View File

@ -888,7 +888,7 @@ void TemplateTable::aastore() {
// do fast instanceof cache test
__ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4);
__ ld_ptr(O4, in_bytes(objArrayKlass::element_klass_offset()), O4);
assert(Otos_i == O0, "just checking");
@ -2031,7 +2031,7 @@ void TemplateTable::_return(TosState state) {
__ access_local_ptr(G3_scratch, Otos_i);
__ load_klass(Otos_i, O2);
__ set(JVM_ACC_HAS_FINALIZER, G3);
__ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
__ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
__ andcc(G3, O2, G0);
Label skip_register_finalizer;
__ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
@ -3350,13 +3350,13 @@ void TemplateTable::_new() {
__ ld_ptr(Rscratch, Roffset, RinstanceKlass);
// make sure klass is fully initialized:
__ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch);
__ ldub(RinstanceKlass, in_bytes(instanceKlass::init_state_offset()), G3_scratch);
__ cmp(G3_scratch, instanceKlass::fully_initialized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_case);
__ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
__ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
// get instance_size in instanceKlass (already aligned)
//__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
//__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
// make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
__ btst(Klass::_lh_instance_slow_path_bit, Roffset);
@ -3483,7 +3483,7 @@ void TemplateTable::_new() {
__ bind(initialize_header);
if (UseBiasedLocking) {
__ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch);
__ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
} else {
__ set((intptr_t)markOopDesc::prototype(), G4_scratch);
}

File diff suppressed because it is too large Load Diff

View File

@ -503,7 +503,31 @@ class Assembler : public AbstractAssembler {
REX_WR = 0x4C,
REX_WRB = 0x4D,
REX_WRX = 0x4E,
REX_WRXB = 0x4F
REX_WRXB = 0x4F,
VEX_3bytes = 0xC4,
VEX_2bytes = 0xC5
};
enum VexPrefix {
VEX_B = 0x20,
VEX_X = 0x40,
VEX_R = 0x80,
VEX_W = 0x80
};
enum VexSimdPrefix {
VEX_SIMD_NONE = 0x0,
VEX_SIMD_66 = 0x1,
VEX_SIMD_F3 = 0x2,
VEX_SIMD_F2 = 0x3
};
enum VexOpcode {
VEX_OPCODE_NONE = 0x0,
VEX_OPCODE_0F = 0x1,
VEX_OPCODE_0F_38 = 0x2,
VEX_OPCODE_0F_3A = 0x3
};
enum WhichOperand {
@ -546,12 +570,99 @@ private:
void prefixq(Address adr);
void prefix(Address adr, Register reg, bool byteinst = false);
void prefixq(Address adr, Register reg);
void prefix(Address adr, XMMRegister reg);
void prefixq(Address adr, Register reg);
void prefixq(Address adr, XMMRegister reg);
void prefetch_prefix(Address src);
void rex_prefix(Address adr, XMMRegister xreg,
VexSimdPrefix pre, VexOpcode opc, bool rex_w);
int rex_prefix_and_encode(int dst_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc, bool rex_w);
void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
int nds_enc, VexSimdPrefix pre, VexOpcode opc,
bool vector256);
void vex_prefix(Address adr, int nds_enc, int xreg_enc,
VexSimdPrefix pre, VexOpcode opc,
bool vex_w, bool vector256);
void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, bool vector256 = false) {
vex_prefix(src, nds->encoding(), dst->encoding(),
pre, VEX_OPCODE_0F, false, vector256);
}
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc,
bool vex_w, bool vector256);
int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, bool vector256 = false) {
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
pre, VEX_OPCODE_0F, false, vector256);
}
void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, bool vector256 = false);
void simd_prefix(XMMRegister dst, Address src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
simd_prefix(dst, xnoreg, src, pre, opc);
}
void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
simd_prefix(src, dst, pre);
}
void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre) {
bool rex_w = true;
simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, bool vector256 = false);
int simd_prefix_and_encode(XMMRegister dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
return simd_prefix_and_encode(dst, xnoreg, src, pre, opc);
}
// Move/convert 32-bit integer value.
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre) {
// It is OK to cast from Register to XMMRegister to pass argument here
// since only encoding is used in simd_prefix_and_encode() and number of
// Gen and Xmm registers are the same.
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre);
}
int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) {
return simd_prefix_and_encode(dst, xnoreg, src, pre);
}
int simd_prefix_and_encode(Register dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc);
}
// Move/convert 64-bit integer value.
int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre) {
bool rex_w = true;
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) {
return simd_prefix_and_encode_q(dst, xnoreg, src, pre);
}
int simd_prefix_and_encode_q(Register dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
bool rex_w = true;
return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w);
}
// Helper functions for groups of instructions
void emit_arith_b(int op1, int op2, Register dst, int imm8);
@ -764,6 +875,7 @@ private:
void addss(XMMRegister dst, Address src);
void addss(XMMRegister dst, XMMRegister src);
void andl(Address dst, int32_t imm32);
void andl(Register dst, int32_t imm32);
void andl(Register dst, Address src);
void andl(Register dst, Register src);
@ -774,9 +886,11 @@ private:
void andq(Register dst, Register src);
// Bitwise Logical AND of Packed Double-Precision Floating-Point Values
void andpd(XMMRegister dst, Address src);
void andpd(XMMRegister dst, XMMRegister src);
// Bitwise Logical AND of Packed Single-Precision Floating-Point Values
void andps(XMMRegister dst, XMMRegister src);
void bsfl(Register dst, Register src);
void bsrl(Register dst, Register src);
@ -837,9 +951,11 @@ private:
// Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
void comisd(XMMRegister dst, Address src);
void comisd(XMMRegister dst, XMMRegister src);
// Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
void comiss(XMMRegister dst, Address src);
void comiss(XMMRegister dst, XMMRegister src);
// Identify processor type and features
void cpuid() {
@ -849,14 +965,19 @@ private:
// Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
void cvtsd2ss(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, Address src);
// Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
void cvtsi2sdl(XMMRegister dst, Register src);
void cvtsi2sdl(XMMRegister dst, Address src);
void cvtsi2sdq(XMMRegister dst, Register src);
void cvtsi2sdq(XMMRegister dst, Address src);
// Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
void cvtsi2ssl(XMMRegister dst, Register src);
void cvtsi2ssl(XMMRegister dst, Address src);
void cvtsi2ssq(XMMRegister dst, Register src);
void cvtsi2ssq(XMMRegister dst, Address src);
// Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
void cvtdq2pd(XMMRegister dst, XMMRegister src);
@ -866,6 +987,7 @@ private:
// Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, Address src);
// Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
void cvttsd2sil(Register dst, Address src);
@ -1140,8 +1262,6 @@ private:
void movdq(Register dst, XMMRegister src);
// Move Aligned Double Quadword
void movdqa(Address dst, XMMRegister src);
void movdqa(XMMRegister dst, Address src);
void movdqa(XMMRegister dst, XMMRegister src);
// Move Unaligned Double Quadword
@ -1261,10 +1381,18 @@ private:
void orq(Register dst, Address src);
void orq(Register dst, Register src);
// Pack with unsigned saturation
void packuswb(XMMRegister dst, XMMRegister src);
void packuswb(XMMRegister dst, Address src);
// SSE4.2 string instructions
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
// SSE4.1 packed move
void pmovzxbw(XMMRegister dst, XMMRegister src);
void pmovzxbw(XMMRegister dst, Address src);
#ifndef _LP64 // no 32bit push/pop on amd64
void popl(Address dst);
#endif
@ -1292,6 +1420,7 @@ private:
// POR - Bitwise logical OR
void por(XMMRegister dst, XMMRegister src);
void por(XMMRegister dst, Address src);
// Shuffle Packed Doublewords
void pshufd(XMMRegister dst, XMMRegister src, int mode);
@ -1313,6 +1442,11 @@ private:
// Interleave Low Bytes
void punpcklbw(XMMRegister dst, XMMRegister src);
void punpcklbw(XMMRegister dst, Address src);
// Interleave Low Doublewords
void punpckldq(XMMRegister dst, XMMRegister src);
void punpckldq(XMMRegister dst, Address src);
#ifndef _LP64 // no 32bit push/pop on amd64
void pushl(Address src);
@ -1429,6 +1563,13 @@ private:
void xchgq(Register reg, Address adr);
void xchgq(Register dst, Register src);
// Get Value of Extended Control Register
void xgetbv() {
emit_byte(0x0F);
emit_byte(0x01);
emit_byte(0xD0);
}
void xorl(Register dst, int32_t imm32);
void xorl(Register dst, Address src);
void xorl(Register dst, Register src);
@ -1437,14 +1578,44 @@ private:
void xorq(Register dst, Register src);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
void xorpd(XMMRegister dst, Address src);
void xorpd(XMMRegister dst, XMMRegister src);
// Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
void xorps(XMMRegister dst, Address src);
void xorps(XMMRegister dst, XMMRegister src);
void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
// AVX 3-operands instructions (encoded with VEX prefix)
void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vaddss(XMMRegister dst, XMMRegister nds, Address src);
void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vandpd(XMMRegister dst, XMMRegister nds, Address src);
void vandps(XMMRegister dst, XMMRegister nds, Address src);
void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vdivss(XMMRegister dst, XMMRegister nds, Address src);
void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vmulss(XMMRegister dst, XMMRegister nds, Address src);
void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vsubss(XMMRegister dst, XMMRegister nds, Address src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src);
protected:
// Next instructions require address alignment 16 bytes SSE mode.
// They should be called only from corresponding MacroAssembler instructions.
void andpd(XMMRegister dst, Address src);
void andps(XMMRegister dst, Address src);
void xorpd(XMMRegister dst, Address src);
void xorps(XMMRegister dst, Address src);
};
@ -2175,9 +2346,15 @@ class MacroAssembler: public Assembler {
void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
void andpd(XMMRegister dst, AddressLiteral src);
void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
void andps(XMMRegister dst, AddressLiteral src);
void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
void comiss(XMMRegister dst, AddressLiteral src);
void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
void comisd(XMMRegister dst, AddressLiteral src);
@ -2211,62 +2388,62 @@ private:
void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
void movss(XMMRegister dst, AddressLiteral src);
void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
void movlpd(XMMRegister dst, AddressLiteral src);
public:
void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
void addsd(XMMRegister dst, AddressLiteral src) { Assembler::addsd(dst, as_Address(src)); }
void addsd(XMMRegister dst, AddressLiteral src);
void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
void addss(XMMRegister dst, AddressLiteral src) { Assembler::addss(dst, as_Address(src)); }
void addss(XMMRegister dst, AddressLiteral src);
void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
void divsd(XMMRegister dst, AddressLiteral src) { Assembler::divsd(dst, as_Address(src)); }
void divsd(XMMRegister dst, AddressLiteral src);
void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
void divss(XMMRegister dst, AddressLiteral src) { Assembler::divss(dst, as_Address(src)); }
void divss(XMMRegister dst, AddressLiteral src);
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, AddressLiteral src) { Assembler::movsd(dst, as_Address(src)); }
void movsd(XMMRegister dst, AddressLiteral src);
void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
void mulsd(XMMRegister dst, AddressLiteral src) { Assembler::mulsd(dst, as_Address(src)); }
void mulsd(XMMRegister dst, AddressLiteral src);
void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
void mulss(XMMRegister dst, AddressLiteral src) { Assembler::mulss(dst, as_Address(src)); }
void mulss(XMMRegister dst, AddressLiteral src);
void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, AddressLiteral src) { Assembler::sqrtsd(dst, as_Address(src)); }
void sqrtsd(XMMRegister dst, AddressLiteral src);
void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
void sqrtss(XMMRegister dst, AddressLiteral src) { Assembler::sqrtss(dst, as_Address(src)); }
void sqrtss(XMMRegister dst, AddressLiteral src);
void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
void subsd(XMMRegister dst, AddressLiteral src) { Assembler::subsd(dst, as_Address(src)); }
void subsd(XMMRegister dst, AddressLiteral src);
void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
void subss(XMMRegister dst, AddressLiteral src) { Assembler::subss(dst, as_Address(src)); }
void subss(XMMRegister dst, AddressLiteral src);
void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, AddressLiteral src);
void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, AddressLiteral src);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
@ -2279,6 +2456,53 @@ public:
void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
void xorps(XMMRegister dst, AddressLiteral src);
// AVX 3-operands instructions
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vandpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vandpd(dst, nds, src); }
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vandps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vandps(dst, nds, src); }
void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); }
void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
// Data
void cmov32( Condition cc, Register dst, Address src);

View File

@ -86,6 +86,7 @@ inline void Assembler::prefix(Address adr, Register reg, bool byteinst) {}
inline void Assembler::prefixq(Address adr, Register reg) {}
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
inline void Assembler::prefixq(Address adr, XMMRegister reg) {}
#else
inline void Assembler::emit_long64(jlong x) {
*(jlong*) _code_pos = x;

View File

@ -320,7 +320,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
// begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
__ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
__ get_thread(tmp);
__ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
__ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset()));
__ pop(tmp2);
__ pop(tmp);
__ jcc(Assembler::notEqual, call_patch);
@ -519,7 +519,7 @@ void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
__ load_klass(tmp_reg, src_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ cmpl(ref_type_adr, REF_NONE);
__ jcc(Assembler::equal, _continuation);

View File

@ -1557,8 +1557,8 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
if (op->init_check()) {
__ cmpl(Address(op->klass()->as_register(),
instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)),
__ cmpb(Address(op->klass()->as_register(),
instanceKlass::init_state_offset()),
instanceKlass::fully_initialized);
add_debug_info_for_null_check_here(op->stub()->info());
__ jcc(Assembler::notEqual, *op->stub()->entry());
@ -1730,7 +1730,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
#else
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
#endif // _LP64
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *failure_target);
// successful cast, fall through to profile or jump
} else {
@ -1842,7 +1842,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ load_klass(klass_RInfo, value);
// get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset()));
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
@ -3289,8 +3289,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
}
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
int lh_offset = in_bytes(Klass::layout_helper_offset());
Address klass_lh_addr(tmp, lh_offset);
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
__ cmpl(klass_lh_addr, objArray_lh);
@ -3307,9 +3306,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifndef _LP64
__ movptr(tmp, dst_klass_addr);
__ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset()));
__ push(tmp);
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
__ push(tmp);
__ push(length);
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
@ -3333,15 +3332,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize);
__ load_klass(c_rarg3, dst);
__ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset()));
store_parameter(c_rarg3, 4);
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr));
__ addptr(rsp, 6*wordSize);
#else
__ load_klass(c_rarg4, dst);
__ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset()));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr));
#endif

View File

@ -150,7 +150,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
assert_different_registers(obj, klass, len);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
movptr(t1, Address(klass, Klass::prototype_header_offset()));
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else {
// This assumes that all prototype bits fit in an int32_t

View File

@ -1011,7 +1011,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized
__ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
__ cmpb(Address(klass, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_path);
}
@ -1019,7 +1019,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// assert object can be fast path allocated
{
Label ok, not_ok;
__ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
__ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
__ jcc(Assembler::lessEqual, not_ok);
__ testl(obj_size, Klass::_lh_instance_slow_path_bit);
@ -1040,7 +1040,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(retry_tlab);
// get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
@ -1052,7 +1052,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden);
// get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ eden_allocate(obj, obj_size, 0, t1, slow_path);
__ incr_allocated_bytes(thread, obj_size, 0);
@ -1119,7 +1119,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{
Label ok;
Register t0 = obj;
__ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
__ movl(t0, Address(klass, Klass::layout_helper_offset()));
__ sarl(t0, Klass::_lh_array_tag_shift);
int tag = ((id == new_type_array_id)
? Klass::_lh_array_tag_type_value
@ -1153,7 +1153,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive movl does right thing on 64bit
__ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ movl(t1, Address(klass, Klass::layout_helper_offset()));
// since size is postive movl does right thing on 64bit
__ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage");
@ -1167,7 +1167,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
__ initialize_header(obj, klass, length, t1, t2);
__ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
__ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
__ andptr(t1, Klass::_lh_header_size_mask);
@ -1180,7 +1180,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive movl does right thing on 64bit
__ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ movl(t1, Address(klass, Klass::layout_helper_offset()));
// since size is postive movl does right thing on 64bit
__ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage");
@ -1195,7 +1195,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ incr_allocated_bytes(thread, arr_size, 0);
__ initialize_header(obj, klass, length, t1, t2);
__ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
__ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
__ andptr(t1, Klass::_lh_header_size_mask);
@ -1267,7 +1267,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label register_finalizer;
Register t = rsi;
__ load_klass(t, rax);
__ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ movl(t, Address(t, Klass::access_flags_offset()));
__ testl(t, JVM_ACC_HAS_FINALIZER);
__ jcc(Assembler::notZero, register_finalizer);
__ ret(0);

View File

@ -511,7 +511,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// get synchronization object
Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
@ -763,7 +763,7 @@ void InterpreterGenerator::lock_method(void) {
#endif // ASSERT
// get synchronization object
{ Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags);
__ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
__ testl(rax, JVM_ACC_STATIC);
@ -1180,7 +1180,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call
{ Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);

View File

@ -1160,7 +1160,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
Address vmarg; // __ argument_address(vmargslot)
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int java_mirror_offset = in_bytes(Klass::java_mirror_offset());
if (have_entry(ek)) {
__ nop(); // empty stubs make SG sick

View File

@ -237,9 +237,21 @@ int NativeMovRegMem::instruction_start() const {
int off = 0;
u_char instr_0 = ubyte_at(off);
// See comment in Assembler::locate_operand() about VEX prefixes.
if (instr_0 == instruction_VEX_prefix_2bytes) {
assert((UseAVX > 0), "shouldn't have VEX prefix");
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
return 2;
}
if (instr_0 == instruction_VEX_prefix_3bytes) {
assert((UseAVX > 0), "shouldn't have VEX prefix");
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
return 3;
}
// First check to see if we have a (prefixed or not) xor
if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
if (instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
off++;
instr_0 = ubyte_at(off);
}
@ -256,13 +268,13 @@ int NativeMovRegMem::instruction_start() const {
instr_0 = ubyte_at(off);
}
if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
off++;
instr_0 = ubyte_at(off);
}
if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
off++;
instr_0 = ubyte_at(off);

View File

@ -287,6 +287,9 @@ class NativeMovRegMem: public NativeInstruction {
instruction_code_xmm_store = 0x11,
instruction_code_xmm_lpd = 0x12,
instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes,
instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
instruction_size = 4,
instruction_offset = 0,
data_offset = 2,

View File

@ -53,6 +53,7 @@ REGISTER_DEFINITION(Register, r14);
REGISTER_DEFINITION(Register, r15);
#endif // AMD64
REGISTER_DEFINITION(XMMRegister, xnoreg);
REGISTER_DEFINITION(XMMRegister, xmm0 );
REGISTER_DEFINITION(XMMRegister, xmm1 );
REGISTER_DEFINITION(XMMRegister, xmm2 );
@ -115,6 +116,7 @@ REGISTER_DEFINITION(Register, r12_heapbase);
REGISTER_DEFINITION(Register, r15_thread);
#endif // AMD64
REGISTER_DEFINITION(MMXRegister, mnoreg );
REGISTER_DEFINITION(MMXRegister, mmx0 );
REGISTER_DEFINITION(MMXRegister, mmx1 );
REGISTER_DEFINITION(MMXRegister, mmx2 );

View File

@ -1374,8 +1374,7 @@ class StubGenerator: public StubCodeGenerator {
// L_success, L_failure, NULL);
assert_different_registers(sub_klass, temp);
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
// if the pointers are equal, we are done (e.g., String[] elements)
__ cmpptr(sub_klass, super_klass_addr);
@ -1787,8 +1786,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
//
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
int lh_offset = in_bytes(Klass::layout_helper_offset());
Address src_klass_lh_addr(rcx_src_klass, lh_offset);
// Handle objArrays completely differently...
@ -1914,10 +1912,8 @@ class StubGenerator: public StubCodeGenerator {
// live at this point: rcx_src_klass, dst[_pos], src[_pos]
{
// Handy offsets:
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
Register rsi_dst_klass = rsi;
Register rdi_temp = rdi;
@ -2323,6 +2319,9 @@ class StubGenerator: public StubCodeGenerator {
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
rax, rcx);
// Build this early so it's available for the interpreter
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
}
@ -2334,7 +2333,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
//------------------------------------------------------------------------------------------------------------------------
// entry points that are platform specific

View File

@ -2261,8 +2261,7 @@ class StubGenerator: public StubCodeGenerator {
// The ckoff and ckval must be mutually consistent,
// even though caller generates both.
{ Label L;
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ cmpl(ckoff, Address(ckval, sco_offset));
__ jcc(Assembler::equal, L);
__ stop("super_check_offset inconsistent");
@ -2572,8 +2571,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
//
const int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
const int lh_offset = in_bytes(Klass::layout_helper_offset());
// Handle objArrays completely differently...
const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
@ -2722,15 +2720,13 @@ class StubGenerator: public StubCodeGenerator {
assert_clean_int(count, sco_temp);
// Generate the type check.
const int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
const int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ movl(sco_temp, Address(r11_dst_klass, sco_offset));
assert_clean_int(sco_temp, rax);
generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
// Fetch destination element klass from the objArrayKlass header.
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
__ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
__ movl( sco_temp, Address(r11_dst_klass, sco_offset));
assert_clean_int(sco_temp, rax);
@ -3072,6 +3068,13 @@ class StubGenerator: public StubCodeGenerator {
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
rax, rcx);
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_StackOverflowError));
}
void generate_all() {
@ -3098,12 +3101,6 @@ class StubGenerator: public StubCodeGenerator {
SharedRuntime::
throw_NullPointerException_at_call));
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_StackOverflowError));
// entry points that are platform specific
StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();

View File

@ -522,9 +522,18 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
__ pop(rsi); // get saved bcp / (c++ prev state ).
__ pop(rax); // get return address
__ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
// Restore sender's sp as SP. This is necessary if the sender's
// frame is an extended compiled frame (see gen_c2i_adapter())
// and safer anyway in case of JSR292 adaptations.
__ pop(rax); // return address must be moved if SP is changed
__ mov(rsp, rsi);
__ push(rax);
// Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
__ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
// all done with frame size check
__ bind(after_frame_check_pop);
__ pop(rsi);
@ -552,7 +561,7 @@ void InterpreterGenerator::lock_method(void) {
#endif // ASSERT
// get synchronization object
{ Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
@ -1012,7 +1021,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call
{ Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);

View File

@ -467,8 +467,18 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
__ cmpptr(rsp, rax);
__ jcc(Assembler::above, after_frame_check);
__ pop(rax); // get return address
__ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
// Restore sender's sp as SP. This is necessary if the sender's
// frame is an extended compiled frame (see gen_c2i_adapter())
// and safer anyway in case of JSR292 adaptations.
__ pop(rax); // return address must be moved if SP is changed
__ mov(rsp, r13);
__ push(rax);
// Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
__ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
// all done with frame size check
__ bind(after_frame_check);
@ -505,8 +515,7 @@ void InterpreterGenerator::lock_method(void) {
// get synchronization object
{
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
Label done;
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
@ -1006,8 +1015,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call
{
Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);

View File

@ -980,7 +980,7 @@ void TemplateTable::aastore() {
__ load_klass(rbx, rax);
// Move superklass into EAX
__ load_klass(rax, rdx);
__ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
__ movptr(rax, Address(rax, objArrayKlass::element_klass_offset()));
// Compress array+index*wordSize+12 into a single register. Frees ECX.
__ lea(rdx, element_address);
@ -2033,7 +2033,7 @@ void TemplateTable::_return(TosState state) {
assert(state == vtos, "only valid state");
__ movptr(rax, aaddress(0));
__ load_klass(rdi, rax);
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ movl(rdi, Address(rdi, Klass::access_flags_offset()));
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer;
__ jcc(Assembler::zero, skip_register_finalizer);
@ -3188,11 +3188,11 @@ void TemplateTable::_new() {
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
__ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
__ cmpb(Address(rcx, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_case);
// get instance_size in instanceKlass (scaled to a count of bytes)
__ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
__ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
// test to see if it has a finalizer or is malformed in some way
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
__ jcc(Assembler::notZero, slow_case);
@ -3293,7 +3293,7 @@ void TemplateTable::_new() {
__ bind(initialize_header);
if (UseBiasedLocking) {
__ pop(rcx); // get saved klass back in the register.
__ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
__ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),

View File

@ -1004,8 +1004,7 @@ void TemplateTable::aastore() {
// Move superklass into rax
__ load_klass(rax, rdx);
__ movptr(rax, Address(rax,
sizeof(oopDesc) +
objArrayKlass::element_klass_offset_in_bytes()));
objArrayKlass::element_klass_offset()));
// Compress array + index*oopSize + 12 into a single register. Frees rcx.
__ lea(rdx, element_address);
@ -2067,7 +2066,7 @@ void TemplateTable::_return(TosState state) {
assert(state == vtos, "only valid state");
__ movptr(c_rarg1, aaddress(0));
__ load_klass(rdi, c_rarg1);
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ movl(rdi, Address(rdi, Klass::access_flags_offset()));
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer;
__ jcc(Assembler::zero, skip_register_finalizer);
@ -3235,16 +3234,15 @@ void TemplateTable::_new() {
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
__ cmpl(Address(rsi,
instanceKlass::init_state_offset_in_bytes() +
sizeof(oopDesc)),
__ cmpb(Address(rsi,
instanceKlass::init_state_offset()),
instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_case);
// get instance_size in instanceKlass (scaled to a count of bytes)
__ movl(rdx,
Address(rsi,
Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
Klass::layout_helper_offset()));
// test to see if it has a finalizer or is malformed in some way
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
__ jcc(Assembler::notZero, slow_case);
@ -3337,7 +3335,7 @@ void TemplateTable::_new() {
// initialize object header only.
__ bind(initialize_header);
if (UseBiasedLocking) {
__ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
__ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),

View File

@ -50,7 +50,7 @@ const char* VM_Version::_features_str = "";
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
static BufferBlob* stub_blob;
static const int stub_size = 400;
static const int stub_size = 550;
extern "C" {
typedef void (*getPsrInfo_stub_t)(void*);
@ -73,7 +73,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
Label ext_cpuid1, ext_cpuid5, done;
Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;
StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
# define __ _masm->
@ -229,14 +229,51 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx);
//
// Check if OS has enabled XGETBV instruction to access XCR0
// (OSXSAVE feature flag) and CPU supports AVX
//
__ andl(rcx, 0x18000000);
__ cmpl(rcx, 0x18000000);
__ jccb(Assembler::notEqual, sef_cpuid);
//
// XCR0, XFEATURE_ENABLED_MASK register
//
__ xorl(rcx, rcx); // zero for XCR0 register
__ xgetbv();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rdx);
//
// cpuid(0x7) Structured Extended Features
//
__ bind(sef_cpuid);
__ movl(rax, 7);
__ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
__ jccb(Assembler::greater, ext_cpuid);
__ xorl(rcx, rcx);
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
//
// Extended cpuid(0x80000000)
//
__ bind(ext_cpuid);
__ movl(rax, 0x80000000);
__ cpuid();
__ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported?
__ jcc(Assembler::belowEqual, done);
__ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported?
__ jccb(Assembler::belowEqual, ext_cpuid1);
__ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported?
__ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported?
__ jccb(Assembler::belowEqual, ext_cpuid5);
__ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported?
__ jccb(Assembler::belowEqual, ext_cpuid7);
//
// Extended cpuid(0x80000008)
//
@ -248,6 +285,18 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx);
//
// Extended cpuid(0x80000007)
//
__ bind(ext_cpuid7);
__ movl(rax, 0x80000007);
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx);
//
// Extended cpuid(0x80000005)
//
@ -359,13 +408,19 @@ void VM_Version::get_processor_features() {
if (UseSSE < 1)
_cpuFeatures &= ~CPU_SSE;
if (UseAVX < 2)
_cpuFeatures &= ~CPU_AVX2;
if (UseAVX < 1)
_cpuFeatures &= ~CPU_AVX;
if (logical_processors_per_package() == 1) {
// HT processor could be installed on a system which doesn't support HT.
_cpuFeatures &= ~CPU_HT;
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
@ -379,27 +434,39 @@ void VM_Version::get_processor_features() {
(supports_sse4_1() ? ", sse4.1" : ""),
(supports_sse4_2() ? ", sse4.2" : ""),
(supports_popcnt() ? ", popcnt" : ""),
(supports_avx() ? ", avx" : ""),
(supports_avx2() ? ", avx2" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
(supports_lzcnt() ? ", lzcnt": ""),
(supports_sse4a() ? ", sse4a": ""),
(supports_ht() ? ", ht": ""));
(supports_ht() ? ", ht": ""),
(supports_tsc() ? ", tsc": ""),
(supports_tscinv_bit() ? ", tscinvbit": ""),
(supports_tscinv() ? ", tscinv": ""));
_features_str = strdup(buf);
// UseSSE is set to the smaller of what hardware supports and what
// the command line requires. I.e., you cannot set UseSSE to 2 on
// older Pentiums which do not support it.
if( UseSSE > 4 ) UseSSE=4;
if( UseSSE < 0 ) UseSSE=0;
if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support
if (UseSSE > 4) UseSSE=4;
if (UseSSE < 0) UseSSE=0;
if (!supports_sse4_1()) // Drop to 3 if no SSE4 support
UseSSE = MIN2((intx)3,UseSSE);
if( !supports_sse3() ) // Drop to 2 if no SSE3 support
if (!supports_sse3()) // Drop to 2 if no SSE3 support
UseSSE = MIN2((intx)2,UseSSE);
if( !supports_sse2() ) // Drop to 1 if no SSE2 support
if (!supports_sse2()) // Drop to 1 if no SSE2 support
UseSSE = MIN2((intx)1,UseSSE);
if( !supports_sse () ) // Drop to 0 if no SSE support
if (!supports_sse ()) // Drop to 0 if no SSE support
UseSSE = 0;
if (UseAVX > 2) UseAVX=2;
if (UseAVX < 0) UseAVX=0;
if (!supports_avx2()) // Drop to 1 if no AVX2 support
UseAVX = MIN2((intx)1,UseAVX);
if (!supports_avx ()) // Drop to 0 if no AVX support
UseAVX = 0;
// On new cpus instructions which update whole XMM register should be used
// to prevent partial register stall due to dependencies on high half.
//
@ -534,6 +601,9 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
UsePopCountInstruction = true;
}
} else if (UsePopCountInstruction) {
warning("POPCNT instruction is not available on this CPU");
FLAG_SET_DEFAULT(UsePopCountInstruction, false);
}
#ifdef COMPILER2
@ -605,7 +675,11 @@ void VM_Version::get_processor_features() {
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per core: %u",
logical_processors_per_package());
tty->print_cr("UseSSE=%d",UseSSE);
tty->print("UseSSE=%d",UseSSE);
if (UseAVX > 0) {
tty->print(" UseAVX=%d",UseAVX);
}
tty->cr();
tty->print("Allocation");
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
tty->print_cr(": no prefetching");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +78,10 @@ public:
sse4_2 : 1,
: 2,
popcnt : 1,
: 8;
: 3,
osxsave : 1,
avx : 1,
: 3;
} bits;
};
@ -168,6 +171,15 @@ public:
} bits;
};
union ExtCpuid7Edx {
uint32_t value;
struct {
uint32_t : 8,
tsc_invariance : 1,
: 23;
} bits;
};
union ExtCpuid8Ecx {
uint32_t value;
struct {
@ -176,32 +188,75 @@ public:
} bits;
};
protected:
static int _cpu;
static int _model;
static int _stepping;
static int _cpuFeatures; // features returned by the "cpuid" instruction
// 0 if this instruction is not available
static const char* _features_str;
union SefCpuid7Eax {
uint32_t value;
};
enum {
CPU_CX8 = (1 << 0), // next bits are from cpuid 1 (EDX)
CPU_CMOV = (1 << 1),
CPU_FXSR = (1 << 2),
CPU_HT = (1 << 3),
CPU_MMX = (1 << 4),
CPU_3DNOW_PREFETCH = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions
// may not necessarily support other 3dnow instructions
CPU_SSE = (1 << 6),
CPU_SSE2 = (1 << 7),
CPU_SSE3 = (1 << 8), // SSE3 comes from cpuid 1 (ECX)
CPU_SSSE3 = (1 << 9),
CPU_SSE4A = (1 << 10),
CPU_SSE4_1 = (1 << 11),
CPU_SSE4_2 = (1 << 12),
CPU_POPCNT = (1 << 13),
CPU_LZCNT = (1 << 14)
} cpuFeatureFlags;
union SefCpuid7Ebx {
uint32_t value;
struct {
uint32_t fsgsbase : 1,
: 2,
bmi1 : 1,
: 1,
avx2 : 1,
: 2,
bmi2 : 1,
: 23;
} bits;
};
union XemXcr0Eax {
uint32_t value;
struct {
uint32_t x87 : 1,
sse : 1,
ymm : 1,
: 29;
} bits;
};
protected:
static int _cpu;
static int _model;
static int _stepping;
static int _cpuFeatures; // features returned by the "cpuid" instruction
// 0 if this instruction is not available
static const char* _features_str;
enum {
CPU_CX8 = (1 << 0), // next bits are from cpuid 1 (EDX)
CPU_CMOV = (1 << 1),
CPU_FXSR = (1 << 2),
CPU_HT = (1 << 3),
CPU_MMX = (1 << 4),
CPU_3DNOW_PREFETCH = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions
// may not necessarily support other 3dnow instructions
CPU_SSE = (1 << 6),
CPU_SSE2 = (1 << 7),
CPU_SSE3 = (1 << 8), // SSE3 comes from cpuid 1 (ECX)
CPU_SSSE3 = (1 << 9),
CPU_SSE4A = (1 << 10),
CPU_SSE4_1 = (1 << 11),
CPU_SSE4_2 = (1 << 12),
CPU_POPCNT = (1 << 13),
CPU_LZCNT = (1 << 14),
CPU_TSC = (1 << 15),
CPU_TSCINV = (1 << 16),
CPU_AVX = (1 << 17),
CPU_AVX2 = (1 << 18)
} cpuFeatureFlags;
enum {
// AMD
CPU_FAMILY_AMD_11H = 17,
// Intel
CPU_FAMILY_INTEL_CORE = 6,
CPU_MODEL_NEHALEM_EP = 26,
CPU_MODEL_WESTMERE_EP = 44,
// CPU_MODEL_IVYBRIDGE_EP = ??, TODO - get real value
CPU_MODEL_SANDYBRIDGE_EP = 45
} cpuExtendedFamily;
// cpuid information block. All info derived from executing cpuid with
// various function numbers is stored here. Intel and AMD info is
@ -228,6 +283,12 @@ protected:
uint32_t dcp_cpuid4_ecx; // unused currently
uint32_t dcp_cpuid4_edx; // unused currently
// cpuid function 7 (structured extended features)
SefCpuid7Eax sef_cpuid7_eax;
SefCpuid7Ebx sef_cpuid7_ebx;
uint32_t sef_cpuid7_ecx; // unused currently
uint32_t sef_cpuid7_edx; // unused currently
// cpuid function 0xB (processor topology)
// ecx = 0
uint32_t tpl_cpuidB0_eax;
@ -270,11 +331,21 @@ protected:
ExtCpuid5Ex ext_cpuid5_ecx; // L1 data cache info (AMD)
ExtCpuid5Ex ext_cpuid5_edx; // L1 instruction cache info (AMD)
// cpuid function 0x80000007
uint32_t ext_cpuid7_eax; // reserved
uint32_t ext_cpuid7_ebx; // reserved
uint32_t ext_cpuid7_ecx; // reserved
ExtCpuid7Edx ext_cpuid7_edx; // tscinv
// cpuid function 0x80000008
uint32_t ext_cpuid8_eax; // unused currently
uint32_t ext_cpuid8_ebx; // reserved
ExtCpuid8Ecx ext_cpuid8_ecx;
uint32_t ext_cpuid8_edx; // reserved
// extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
XemXcr0Eax xem_xcr0_eax;
uint32_t xem_xcr0_edx; // reserved
};
// The actual cpuid info block
@ -286,19 +357,23 @@ protected:
result += _cpuid_info.std_cpuid1_eax.bits.ext_family;
return result;
}
static uint32_t extended_cpu_model() {
uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model;
result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4;
return result;
}
static uint32_t cpu_stepping() {
uint32_t result = _cpuid_info.std_cpuid1_eax.bits.stepping;
return result;
}
static uint logical_processor_count() {
uint result = threads_per_core();
return result;
}
static uint32_t feature_flags() {
uint32_t result = 0;
if (_cpuid_info.std_cpuid1_edx.bits.cmpxchg8 != 0)
@ -328,6 +403,18 @@ protected:
result |= CPU_SSE4_2;
if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0)
result |= CPU_POPCNT;
if (_cpuid_info.std_cpuid1_ecx.bits.avx != 0 &&
_cpuid_info.std_cpuid1_ecx.bits.osxsave != 0 &&
_cpuid_info.xem_xcr0_eax.bits.sse != 0 &&
_cpuid_info.xem_xcr0_eax.bits.ymm != 0) {
result |= CPU_AVX;
if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
result |= CPU_AVX2;
}
if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
result |= CPU_TSC;
if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
result |= CPU_TSCINV;
// AMD features.
if (is_amd()) {
@ -350,12 +437,15 @@ public:
static ByteSize std_cpuid0_offset() { return byte_offset_of(CpuidInfo, std_max_function); }
static ByteSize std_cpuid1_offset() { return byte_offset_of(CpuidInfo, std_cpuid1_eax); }
static ByteSize dcp_cpuid4_offset() { return byte_offset_of(CpuidInfo, dcp_cpuid4_eax); }
static ByteSize sef_cpuid7_offset() { return byte_offset_of(CpuidInfo, sef_cpuid7_eax); }
static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); }
static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); }
static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); }
// Initialization
static void initialize();
@ -382,7 +472,6 @@ public:
//
static int cpu_family() { return _cpu;}
static bool is_P6() { return cpu_family() >= 6; }
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
@ -447,14 +536,51 @@ public:
static bool supports_sse4_1() { return (_cpuFeatures & CPU_SSE4_1) != 0; }
static bool supports_sse4_2() { return (_cpuFeatures & CPU_SSE4_2) != 0; }
static bool supports_popcnt() { return (_cpuFeatures & CPU_POPCNT) != 0; }
//
static bool supports_avx() { return (_cpuFeatures & CPU_AVX) != 0; }
static bool supports_avx2() { return (_cpuFeatures & CPU_AVX2) != 0; }
static bool supports_tsc() { return (_cpuFeatures & CPU_TSC) != 0; }
// Intel features
static bool is_intel_family_core() { return is_intel() &&
extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
static bool is_intel_tsc_synched_at_init() {
if (is_intel_family_core()) {
uint32_t ext_model = extended_cpu_model();
if (ext_model == CPU_MODEL_NEHALEM_EP ||
ext_model == CPU_MODEL_WESTMERE_EP ||
// TODO ext_model == CPU_MODEL_IVYBRIDGE_EP ||
ext_model == CPU_MODEL_SANDYBRIDGE_EP) {
// 2-socket invtsc support. EX versions with 4 sockets are not
// guaranteed to synchronize tscs at initialization via a double
// handshake. The tscs can be explicitly set in software. Code
// that uses tsc values must be prepared for them to arbitrarily
// jump backward or forward.
return true;
}
}
return false;
}
// AMD features
//
static bool supports_3dnow_prefetch() { return (_cpuFeatures & CPU_3DNOW_PREFETCH) != 0; }
static bool supports_mmx_ext() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; }
static bool supports_lzcnt() { return (_cpuFeatures & CPU_LZCNT) != 0; }
static bool supports_sse4a() { return (_cpuFeatures & CPU_SSE4A) != 0; }
static bool is_amd_Barcelona() { return is_amd() &&
extended_cpu_family() == CPU_FAMILY_AMD_11H; }
// Intel and AMD newer cores support fast timestamps well
static bool supports_tscinv_bit() {
return (_cpuFeatures & CPU_TSCINV) != 0;
}
static bool supports_tscinv() {
return supports_tscinv_bit() &&
( (is_amd() && !is_amd_Barcelona()) ||
is_intel_tsc_synched_at_init() );
}
// Intel Core and newer cpus have fast IDIV instruction (excluding Atom).
static bool has_fast_idiv() { return is_intel() && cpu_family() == 6 &&
supports_sse3() && _model != 0x1C; }

View File

@ -0,0 +1,777 @@
//
// Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// X86 Common Architecture Description File
source %{
// Float masks come from different places depending on platform.
#ifdef _LP64
static address float_signmask() { return StubRoutines::x86::float_sign_mask(); }
static address float_signflip() { return StubRoutines::x86::float_sign_flip(); }
static address double_signmask() { return StubRoutines::x86::double_sign_mask(); }
static address double_signflip() { return StubRoutines::x86::double_sign_flip(); }
#else
static address float_signmask() { return (address)float_signmask_pool; }
static address float_signflip() { return (address)float_signflip_pool; }
static address double_signmask() { return (address)double_signmask_pool; }
static address double_signflip() { return (address)double_signflip_pool; }
#endif
%}
// INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit)
instruct addF_reg(regF dst, regF src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (AddF dst src));
format %{ "addss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ addss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct addF_mem(regF dst, memory src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (AddF dst (LoadF src)));
format %{ "addss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ addss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct addF_imm(regF dst, immF con) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (AddF dst con));
format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ addss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vaddF_reg(regF dst, regF src1, regF src2) %{
predicate(UseAVX > 0);
match(Set dst (AddF src1 src2));
format %{ "vaddss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddF_mem(regF dst, regF src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (AddF src1 (LoadF src2)));
format %{ "vaddss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vaddF_imm(regF dst, regF src, immF con) %{
predicate(UseAVX > 0);
match(Set dst (AddF src con));
format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct addD_reg(regD dst, regD src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (AddD dst src));
format %{ "addsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ addsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct addD_mem(regD dst, memory src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (AddD dst (LoadD src)));
format %{ "addsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ addsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct addD_imm(regD dst, immD con) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (AddD dst con));
format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ addsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vaddD_reg(regD dst, regD src1, regD src2) %{
predicate(UseAVX > 0);
match(Set dst (AddD src1 src2));
format %{ "vaddsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddD_mem(regD dst, regD src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (AddD src1 (LoadD src2)));
format %{ "vaddsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vaddD_imm(regD dst, regD src, immD con) %{
predicate(UseAVX > 0);
match(Set dst (AddD src con));
format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct subF_reg(regF dst, regF src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (SubF dst src));
format %{ "subss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ subss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct subF_mem(regF dst, memory src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (SubF dst (LoadF src)));
format %{ "subss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ subss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct subF_imm(regF dst, immF con) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (SubF dst con));
format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ subss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vsubF_reg(regF dst, regF src1, regF src2) %{
predicate(UseAVX > 0);
match(Set dst (SubF src1 src2));
format %{ "vsubss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubF_mem(regF dst, regF src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (SubF src1 (LoadF src2)));
format %{ "vsubss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vsubF_imm(regF dst, regF src, immF con) %{
predicate(UseAVX > 0);
match(Set dst (SubF src con));
format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct subD_reg(regD dst, regD src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (SubD dst src));
format %{ "subsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ subsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct subD_mem(regD dst, memory src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (SubD dst (LoadD src)));
format %{ "subsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ subsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct subD_imm(regD dst, immD con) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (SubD dst con));
format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ subsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vsubD_reg(regD dst, regD src1, regD src2) %{
predicate(UseAVX > 0);
match(Set dst (SubD src1 src2));
format %{ "vsubsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubD_mem(regD dst, regD src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (SubD src1 (LoadD src2)));
format %{ "vsubsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vsubD_imm(regD dst, regD src, immD con) %{
predicate(UseAVX > 0);
match(Set dst (SubD src con));
format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct mulF_reg(regF dst, regF src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (MulF dst src));
format %{ "mulss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ mulss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct mulF_mem(regF dst, memory src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (MulF dst (LoadF src)));
format %{ "mulss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ mulss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct mulF_imm(regF dst, immF con) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (MulF dst con));
format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ mulss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vmulF_reg(regF dst, regF src1, regF src2) %{
predicate(UseAVX > 0);
match(Set dst (MulF src1 src2));
format %{ "vmulss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulF_mem(regF dst, regF src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (MulF src1 (LoadF src2)));
format %{ "vmulss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vmulF_imm(regF dst, regF src, immF con) %{
predicate(UseAVX > 0);
match(Set dst (MulF src con));
format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct mulD_reg(regD dst, regD src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (MulD dst src));
format %{ "mulsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ mulsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct mulD_mem(regD dst, memory src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (MulD dst (LoadD src)));
format %{ "mulsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ mulsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct mulD_imm(regD dst, immD con) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (MulD dst con));
format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ mulsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vmulD_reg(regD dst, regD src1, regD src2) %{
predicate(UseAVX > 0);
match(Set dst (MulD src1 src2));
format %{ "vmulsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulD_mem(regD dst, regD src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (MulD src1 (LoadD src2)));
format %{ "vmulsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vmulD_imm(regD dst, regD src, immD con) %{
predicate(UseAVX > 0);
match(Set dst (MulD src con));
format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct divF_reg(regF dst, regF src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (DivF dst src));
format %{ "divss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ divss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct divF_mem(regF dst, memory src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (DivF dst (LoadF src)));
format %{ "divss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ divss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct divF_imm(regF dst, immF con) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (DivF dst con));
format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ divss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vdivF_reg(regF dst, regF src1, regF src2) %{
predicate(UseAVX > 0);
match(Set dst (DivF src1 src2));
format %{ "vdivss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vdivF_mem(regF dst, regF src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (DivF src1 (LoadF src2)));
format %{ "vdivss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vdivF_imm(regF dst, regF src, immF con) %{
predicate(UseAVX > 0);
match(Set dst (DivF src con));
format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct divD_reg(regD dst, regD src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (DivD dst src));
format %{ "divsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ divsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct divD_mem(regD dst, memory src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (DivD dst (LoadD src)));
format %{ "divsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ divsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct divD_imm(regD dst, immD con) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (DivD dst con));
format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ divsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_reg(regD dst, regD src1, regD src2) %{
predicate(UseAVX > 0);
match(Set dst (DivD src1 src2));
format %{ "vdivsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_mem(regD dst, regD src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (DivD src1 (LoadD src2)));
format %{ "vdivsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_imm(regD dst, regD src, immD con) %{
predicate(UseAVX > 0);
match(Set dst (DivD src con));
format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct absF_reg(regF dst) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (AbsF dst));
ins_cost(150);
format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %}
ins_encode %{
__ andps($dst$$XMMRegister, ExternalAddress(float_signmask()));
%}
ins_pipe(pipe_slow);
%}
instruct vabsF_reg(regF dst, regF src) %{
predicate(UseAVX > 0);
match(Set dst (AbsF src));
ins_cost(150);
format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %}
ins_encode %{
__ vandps($dst$$XMMRegister, $src$$XMMRegister,
ExternalAddress(float_signmask()));
%}
ins_pipe(pipe_slow);
%}
instruct absD_reg(regD dst) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (AbsD dst));
ins_cost(150);
format %{ "andpd $dst, [0x7fffffffffffffff]\t"
"# abs double by sign masking" %}
ins_encode %{
__ andpd($dst$$XMMRegister, ExternalAddress(double_signmask()));
%}
ins_pipe(pipe_slow);
%}
instruct vabsD_reg(regD dst, regD src) %{
predicate(UseAVX > 0);
match(Set dst (AbsD src));
ins_cost(150);
format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t"
"# abs double by sign masking" %}
ins_encode %{
__ vandpd($dst$$XMMRegister, $src$$XMMRegister,
ExternalAddress(double_signmask()));
%}
ins_pipe(pipe_slow);
%}
instruct negF_reg(regF dst) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (NegF dst));
ins_cost(150);
format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %}
ins_encode %{
__ xorps($dst$$XMMRegister, ExternalAddress(float_signflip()));
%}
ins_pipe(pipe_slow);
%}
instruct vnegF_reg(regF dst, regF src) %{
predicate(UseAVX > 0);
match(Set dst (NegF src));
ins_cost(150);
format %{ "vxorps $dst, $src, [0x80000000]\t# neg float by sign flipping" %}
ins_encode %{
__ vxorps($dst$$XMMRegister, $src$$XMMRegister,
ExternalAddress(float_signflip()));
%}
ins_pipe(pipe_slow);
%}
instruct negD_reg(regD dst) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (NegD dst));
ins_cost(150);
format %{ "xorpd $dst, [0x8000000000000000]\t"
"# neg double by sign flipping" %}
ins_encode %{
__ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip()));
%}
ins_pipe(pipe_slow);
%}
instruct vnegD_reg(regD dst, regD src) %{
predicate(UseAVX > 0);
match(Set dst (NegD src));
ins_cost(150);
format %{ "vxorpd $dst, $src, [0x8000000000000000]\t"
"# neg double by sign flipping" %}
ins_encode %{
__ vxorpd($dst$$XMMRegister, $src$$XMMRegister,
ExternalAddress(double_signflip()));
%}
ins_pipe(pipe_slow);
%}
instruct sqrtF_reg(regF dst, regF src) %{
predicate(UseSSE>=1);
match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
format %{ "sqrtss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ sqrtss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct sqrtF_mem(regF dst, memory src) %{
predicate(UseSSE>=1);
match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
format %{ "sqrtss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ sqrtss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct sqrtF_imm(regF dst, immF con) %{
predicate(UseSSE>=1);
match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ sqrtss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct sqrtD_reg(regD dst, regD src) %{
predicate(UseSSE>=2);
match(Set dst (SqrtD src));
format %{ "sqrtsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ sqrtsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct sqrtD_mem(regD dst, memory src) %{
predicate(UseSSE>=2);
match(Set dst (SqrtD (LoadD src)));
format %{ "sqrtsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ sqrtsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct sqrtD_imm(regD dst, immD con) %{
predicate(UseSSE>=2);
match(Set dst (SqrtD con));
format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ sqrtsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2835,7 +2835,7 @@ void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
#endif
}
void os::free_memory(char *addr, size_t bytes) {
void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
::madvise(addr, bytes, MADV_DONTNEED);
}

View File

@ -2546,8 +2546,8 @@ void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
}
}
void os::free_memory(char *addr, size_t bytes) {
commit_memory(addr, bytes, false);
void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
commit_memory(addr, bytes, alignment_hint, false);
}
void os::numa_make_global(char *addr, size_t bytes) {

View File

@ -59,6 +59,10 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
VMError::report_coredump_status(buffer, success);
}
int os::get_last_error() {
return errno;
}
bool os::is_debugger_attached() {
// not implemented
return false;

View File

@ -2821,7 +2821,7 @@ bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
}
// Uncommit the pages in a specified region.
void os::free_memory(char* addr, size_t bytes) {
void os::free_memory(char* addr, size_t bytes, size_t alignment_hint) {
if (madvise(addr, bytes, MADV_FREE) < 0) {
debug_only(warning("MADV_FREE failed."));
return;

View File

@ -132,7 +132,6 @@ PVOID topLevelVectoredExceptionHandler = NULL;
// save DLL module handle, used by GetModuleFileName
HINSTANCE vm_lib_handle;
static int getLastErrorString(char *buf, size_t len);
BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
switch (reason) {
@ -1452,7 +1451,7 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen)
return result;
}
long errcode = GetLastError();
DWORD errcode = GetLastError();
if (errcode == ERROR_MOD_NOT_FOUND) {
strncpy(ebuf, "Can't find dependent libraries", ebuflen-1);
ebuf[ebuflen-1]='\0';
@ -1463,11 +1462,11 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen)
// If we can read dll-info and find that dll was built
// for an architecture other than Hotspot is running in
// - then print to buffer "DLL was built for a different architecture"
// else call getLastErrorString to obtain system error message
// else call os::lasterror to obtain system error message
// Read system error message into ebuf
// It may or may not be overwritten below (in the for loop and just above)
getLastErrorString(ebuf, (size_t) ebuflen);
lasterror(ebuf, (size_t) ebuflen);
ebuf[ebuflen-1]='\0';
int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0);
if (file_descriptor<0)
@ -1500,7 +1499,7 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen)
::close(file_descriptor);
if (failed_to_get_lib_arch)
{
// file i/o error - report getLastErrorString(...) msg
// file i/o error - report os::lasterror(...) msg
return NULL;
}
@ -1543,7 +1542,7 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen)
"Didn't find runing architecture code in arch_array");
// If the architure is right
// but some other error took place - report getLastErrorString(...) msg
// but some other error took place - report os::lasterror(...) msg
if (lib_arch == running_arch)
{
return NULL;
@ -1775,12 +1774,12 @@ void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
// This method is a copy of JDK's sysGetLastErrorString
// from src/windows/hpi/src/system_md.c
size_t os::lasterror(char *buf, size_t len) {
long errval;
size_t os::lasterror(char* buf, size_t len) {
DWORD errval;
if ((errval = GetLastError()) != 0) {
/* DOS error */
int n = (int)FormatMessage(
// DOS error
size_t n = (size_t)FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
errval,
@ -1789,7 +1788,7 @@ size_t os::lasterror(char *buf, size_t len) {
(DWORD)len,
NULL);
if (n > 3) {
/* Drop final '.', CR, LF */
// Drop final '.', CR, LF
if (buf[n - 1] == '\n') n--;
if (buf[n - 1] == '\r') n--;
if (buf[n - 1] == '.') n--;
@ -1799,17 +1798,25 @@ size_t os::lasterror(char *buf, size_t len) {
}
if (errno != 0) {
/* C runtime error that has no corresponding DOS error code */
const char *s = strerror(errno);
// C runtime error that has no corresponding DOS error code
const char* s = strerror(errno);
size_t n = strlen(s);
if (n >= len) n = len - 1;
strncpy(buf, s, n);
buf[n] = '\0';
return n;
}
return 0;
}
int os::get_last_error() {
DWORD error = GetLastError();
if (error == 0)
error = errno;
return (int)error;
}
// sun.misc.Signal
// NOTE that this is a workaround for an apparent kernel bug where if
// a signal handler for SIGBREAK is installed then that signal handler
@ -3130,7 +3137,7 @@ bool os::unguard_memory(char* addr, size_t bytes) {
}
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
void os::free_memory(char *addr, size_t bytes) { }
void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
void os::numa_make_global(char *addr, size_t bytes) { }
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
bool os::numa_topology_changed() { return false; }
@ -4746,7 +4753,7 @@ bool os::check_heap(bool force) {
fatal("corrupted C heap");
}
}
int err = GetLastError();
DWORD err = GetLastError();
if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
fatal(err_msg("heap walk aborted with error %d", err));
}
@ -4778,45 +4785,6 @@ LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
return EXCEPTION_CONTINUE_SEARCH;
}
static int getLastErrorString(char *buf, size_t len)
{
long errval;
if ((errval = GetLastError()) != 0)
{
/* DOS error */
size_t n = (size_t)FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
errval,
0,
buf,
(DWORD)len,
NULL);
if (n > 3) {
/* Drop final '.', CR, LF */
if (buf[n - 1] == '\n') n--;
if (buf[n - 1] == '\r') n--;
if (buf[n - 1] == '.') n--;
buf[n] = '\0';
}
return (int)n;
}
if (errno != 0)
{
/* C runtime error that has no corresponding DOS error code */
const char *s = strerror(errno);
size_t n = strlen(s);
if (n >= len) n = len - 1;
strncpy(buf, s, n);
buf[n] = '\0';
return (int)n;
}
return 0;
}
// We don't build a headless jre for Windows
bool os::is_headless_jre() { return false; }

View File

@ -28,6 +28,8 @@
static void setup_fpu();
static bool supports_sse();
static jlong rdtsc();
static bool is_allocatable(size_t bytes);
// Used to register dynamic code cache area with the OS

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_X86_VM_OS_BSD_X86_INLINE_HPP
#define OS_CPU_BSD_X86_VM_OS_BSD_X86_INLINE_HPP
#include "runtime/os.hpp"
// See http://www.technovelty.org/code/c/reading-rdtsc.htl for details
inline jlong os::rdtsc() {
#ifndef AMD64
// 64 bit result in edx:eax
uint64_t res;
__asm__ __volatile__ ("rdtsc" : "=A" (res));
return (jlong)res;
#else
uint64_t res;
uint32_t ts1, ts2;
__asm__ __volatile__ ("rdtsc" : "=a" (ts1), "=d" (ts2));
res = ((uint64_t)ts1 | (uint64_t)ts2 << 32);
return (jlong)res;
#endif // AMD64
}
#endif // OS_CPU_BSD_X86_VM_OS_BSD_X86_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,8 @@
static void setup_fpu();
static bool supports_sse();
static jlong rdtsc();
static bool is_allocatable(size_t bytes);
// Used to register dynamic code cache area with the OS

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_X86_VM_OS_LINUX_X86_INLINE_HPP
#define OS_CPU_LINUX_X86_VM_OS_LINUX_X86_INLINE_HPP
#include "runtime/os.hpp"
// See http://www.technovelty.org/code/c/reading-rdtsc.htl for details
inline jlong os::rdtsc() {
#ifndef AMD64
// 64 bit result in edx:eax
uint64_t res;
__asm__ __volatile__ ("rdtsc" : "=A" (res));
return (jlong)res;
#else
uint64_t res;
uint32_t ts1, ts2;
__asm__ __volatile__ ("rdtsc" : "=a" (ts1), "=d" (ts2));
res = ((uint64_t)ts1 | (uint64_t)ts2 << 32);
return (jlong)res;
#endif // AMD64
}
#endif // OS_CPU_LINUX_X86_VM_OS_LINUX_X86_INLINE_HPP

View File

@ -46,6 +46,8 @@
static bool supports_sse();
static jlong rdtsc();
static bool is_allocatable(size_t bytes);
// Used to register dynamic code cache area with the OS

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_SOLARIS_X86_VM_OS_SOLARIS_X86_INLINE_HPP
#define OS_CPU_SOLARIS_X86_VM_OS_SOLARIS_X86_INLINE_HPP
#include "runtime/os.hpp"
inline jlong os::rdtsc() { return _raw_rdtsc(); }
#endif // OS_CPU_SOLARIS_X86_VM_OS_SOLARIS_X86_INLINE_HPP

View File

@ -43,6 +43,11 @@
movl %ebp, %eax
.end
// Support for os::rdtsc()
.inline _raw_rdtsc,0
rdtsc
.end
// Support for jint Atomic::add(jint inc, volatile jint* dest)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_add,3
@ -113,7 +118,6 @@
fistpll (%eax)
.end
// Support for OrderAccess::acquire()
.inline _OrderAccess_acquire,0
movl 0(%esp), %eax

View File

@ -30,12 +30,19 @@
movq %fs:0, %rax
.end
// Get the frame pointer from current frame.
// Get current fp
.inline _get_current_fp,0
.volatile
movq %rbp, %rax
.end
// Support for os::rdtsc()
.inline _raw_rdtsc,0
rdtsc
salq $32, %rdx
orq %rdx, %rax
.end
// Support for jint Atomic::add(jint add_value, volatile jint* dest)
.inline _Atomic_add,2
movl %edi, %eax // save add_value for return

View File

@ -58,6 +58,8 @@
static void setup_fpu();
static bool supports_sse() { return true; }
static jlong rdtsc();
static bool register_code_area(char *low, char *high);
#endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_HPP

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_INLINE_HPP
#define OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_INLINE_HPP
#include "runtime/os.hpp"
inline jlong os::rdtsc() {
// 32 bit: 64 bit result in edx:eax
// 64 bit: 64 bit value in rax
uint64_t res;
res = (uint64_t)__rdtsc();
return (jlong)res;
}
#endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_INLINE_HPP

View File

@ -627,6 +627,7 @@ bool InstructForm::is_wide_memory_kill(FormDict &globals) const {
if( strcmp(_matrule->_opType,"MemBarAcquire") == 0 ) return true;
if( strcmp(_matrule->_opType,"MemBarReleaseLock") == 0 ) return true;
if( strcmp(_matrule->_opType,"MemBarAcquireLock") == 0 ) return true;
if( strcmp(_matrule->_opType,"MemBarStoreStore") == 0 ) return true;
return false;
}
@ -3978,7 +3979,8 @@ bool MatchRule::is_ideal_membar() const {
!strcmp(_opType,"MemBarAcquireLock") ||
!strcmp(_opType,"MemBarReleaseLock") ||
!strcmp(_opType,"MemBarVolatile" ) ||
!strcmp(_opType,"MemBarCPUOrder" ) ;
!strcmp(_opType,"MemBarCPUOrder" ) ||
!strcmp(_opType,"MemBarStoreStore" );
}
bool MatchRule::is_ideal_loadPC() const {

View File

@ -61,6 +61,7 @@ AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
_code_limit = cs->limit();
_code_pos = cs->end();
_oop_recorder= code->oop_recorder();
DEBUG_ONLY( _short_branch_delta = 0; )
if (_code_begin == NULL) {
vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
code->name()));

View File

@ -241,6 +241,33 @@ class AbstractAssembler : public ResourceObj {
// Make it return true on platforms which need to verify
// instruction boundaries for some operations.
inline static bool pd_check_instruction_mark();
// Add delta to short branch distance to verify that it still fit into imm8.
int _short_branch_delta;
int short_branch_delta() const { return _short_branch_delta; }
void set_short_branch_delta() { _short_branch_delta = 32; }
void clear_short_branch_delta() { _short_branch_delta = 0; }
class ShortBranchVerifier: public StackObj {
private:
AbstractAssembler* _assm;
public:
ShortBranchVerifier(AbstractAssembler* assm) : _assm(assm) {
assert(assm->short_branch_delta() == 0, "overlapping instructions");
_assm->set_short_branch_delta();
}
~ShortBranchVerifier() {
_assm->clear_short_branch_delta();
}
};
#else
// Dummy in product.
class ShortBranchVerifier: public StackObj {
public:
ShortBranchVerifier(AbstractAssembler* assm) {}
};
#endif
// Label functions

View File

@ -854,6 +854,9 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
if (opTypeCheck->_info_for_exception) do_info(opTypeCheck->_info_for_exception);
if (opTypeCheck->_info_for_patch) do_info(opTypeCheck->_info_for_patch);
if (opTypeCheck->_object->is_valid()) do_input(opTypeCheck->_object);
if (op->code() == lir_store_check && opTypeCheck->_object->is_valid()) {
do_temp(opTypeCheck->_object);
}
if (opTypeCheck->_array->is_valid()) do_input(opTypeCheck->_array);
if (opTypeCheck->_tmp1->is_valid()) do_temp(opTypeCheck->_tmp1);
if (opTypeCheck->_tmp2->is_valid()) do_temp(opTypeCheck->_tmp2);

View File

@ -1256,8 +1256,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
info = state_for(x);
}
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
__ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
__ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
}

View File

@ -122,18 +122,32 @@ void CE_Eliminator::block_do(BlockBegin* block) {
if (sux != f_goto->default_sux()) return;
// check if at least one word was pushed on sux_state
// inlining depths must match
ValueStack* if_state = if_->state();
ValueStack* sux_state = sux->state();
if (sux_state->stack_size() <= if_->state()->stack_size()) return;
if (if_state->scope()->level() > sux_state->scope()->level()) {
while (sux_state->scope() != if_state->scope()) {
if_state = if_state->caller_state();
assert(if_state != NULL, "states do not match up");
}
} else if (if_state->scope()->level() < sux_state->scope()->level()) {
while (sux_state->scope() != if_state->scope()) {
sux_state = sux_state->caller_state();
assert(sux_state != NULL, "states do not match up");
}
}
if (sux_state->stack_size() <= if_state->stack_size()) return;
// check if phi function is present at end of successor stack and that
// only this phi was pushed on the stack
Value sux_phi = sux_state->stack_at(if_->state()->stack_size());
Value sux_phi = sux_state->stack_at(if_state->stack_size());
if (sux_phi == NULL || sux_phi->as_Phi() == NULL || sux_phi->as_Phi()->block() != sux) return;
if (sux_phi->type()->size() != sux_state->stack_size() - if_->state()->stack_size()) return;
if (sux_phi->type()->size() != sux_state->stack_size() - if_state->stack_size()) return;
// get the values that were pushed in the true- and false-branch
Value t_value = t_goto->state()->stack_at(if_->state()->stack_size());
Value f_value = f_goto->state()->stack_at(if_->state()->stack_size());
Value t_value = t_goto->state()->stack_at(if_state->stack_size());
Value f_value = f_goto->state()->stack_at(if_state->stack_size());
// backend does not support floats
assert(t_value->type()->base() == f_value->type()->base(), "incompatible types");
@ -180,11 +194,7 @@ void CE_Eliminator::block_do(BlockBegin* block) {
Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
// prepare state for Goto
ValueStack* goto_state = if_->state();
while (sux_state->scope() != goto_state->scope()) {
goto_state = goto_state->caller_state();
assert(goto_state != NULL, "states do not match up");
}
ValueStack* goto_state = if_state;
goto_state = goto_state->copy(ValueStack::StateAfter, goto_state->bci());
goto_state->push(result->type(), result);
assert(goto_state->is_same(sux_state), "states must match now");

View File

@ -54,7 +54,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
_flags = ciFlags(access_flags);
_has_finalizer = access_flags.has_finalizer();
_has_subklass = ik->subklass() != NULL;
_init_state = (instanceKlass::ClassState)ik->get_init_state();
_init_state = ik->init_state();
_nonstatic_field_size = ik->nonstatic_field_size();
_has_nonstatic_fields = ik->has_nonstatic_fields();
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
@ -118,7 +118,7 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
void ciInstanceKlass::compute_shared_init_state() {
GUARDED_VM_ENTRY(
instanceKlass* ik = get_instanceKlass();
_init_state = (instanceKlass::ClassState)ik->get_init_state();
_init_state = ik->init_state();
)
}

View File

@ -1589,7 +1589,7 @@ ciTypeFlow::Block::Block(ciTypeFlow* outer,
_next = NULL;
_on_work_list = false;
_backedge_copy = false;
_exception_entry = false;
_has_monitorenter = false;
_trap_bci = -1;
_trap_index = 0;
df_init();
@ -2182,6 +2182,10 @@ bool ciTypeFlow::clone_loop_heads(Loop* lp, StateVector* temp_vector, JsrSet* te
!head->is_clonable_exit(lp))
continue;
// Avoid BoxLock merge.
if (EliminateNestedLocks && head->has_monitorenter())
continue;
// check not already cloned
if (head->backedge_copy_count() != 0)
continue;
@ -2322,6 +2326,10 @@ void ciTypeFlow::flow_block(ciTypeFlow::Block* block,
// Watch for bailouts.
if (failing()) return;
if (str.cur_bc() == Bytecodes::_monitorenter) {
block->set_has_monitorenter();
}
if (res) {
// We have encountered a trap. Record it in this block.

View File

@ -544,15 +544,19 @@ public:
// Has this block been cloned for a loop backedge?
bool _backedge_copy;
// This block is entry to irreducible loop.
bool _irreducible_entry;
// This block has monitor entry point.
bool _has_monitorenter;
// A pointer used for our internal work list
Block* _next;
bool _on_work_list; // on the work list
Block* _next;
Block* _rpo_next; // Reverse post order list
// Loop info
Loop* _loop; // nearest loop
bool _irreducible_entry; // entry to irreducible loop
bool _exception_entry; // entry to exception handler
ciBlock* ciblock() const { return _ciblock; }
StateVector* state() const { return _state; }
@ -689,6 +693,8 @@ public:
bool is_loop_head() const { return _loop && _loop->head() == this; }
void set_irreducible_entry(bool c) { _irreducible_entry = c; }
bool is_irreducible_entry() const { return _irreducible_entry; }
void set_has_monitorenter() { _has_monitorenter = true; }
bool has_monitorenter() const { return _has_monitorenter; }
bool is_visited() const { return has_pre_order(); }
bool is_post_visited() const { return has_post_order(); }
bool is_clonable_exit(Loop* lp);

View File

@ -45,6 +45,7 @@
#include "oops/methodOop.hpp"
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/perfData.hpp"
#include "runtime/reflection.hpp"
@ -1050,7 +1051,7 @@ static FieldAllocationType basic_type_to_atype(bool is_static, BasicType type) {
class FieldAllocationCount: public ResourceObj {
public:
unsigned int count[MAX_FIELD_ALLOCATION_TYPE];
u2 count[MAX_FIELD_ALLOCATION_TYPE];
FieldAllocationCount() {
for (int i = 0; i < MAX_FIELD_ALLOCATION_TYPE; i++) {
@ -1060,6 +1061,8 @@ class FieldAllocationCount: public ResourceObj {
FieldAllocationType update(bool is_static, BasicType type) {
FieldAllocationType atype = basic_type_to_atype(is_static, type);
// Make sure there is no overflow with injected fields.
assert(count[atype] < 0xFFFF, "More than 65535 fields");
count[atype]++;
return atype;
}
@ -1070,7 +1073,7 @@ typeArrayHandle ClassFileParser::parse_fields(Symbol* class_name,
constantPoolHandle cp, bool is_interface,
FieldAllocationCount *fac,
objArrayHandle* fields_annotations,
int* java_fields_count_ptr, TRAPS) {
u2* java_fields_count_ptr, TRAPS) {
ClassFileStream* cfs = stream();
typeArrayHandle nullHandle;
cfs->guarantee_more(2, CHECK_(nullHandle)); // length
@ -2639,8 +2642,11 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
TempNewSymbol& parsed_name,
bool verify,
TRAPS) {
// So that JVMTI can cache class file in the state before retransformable agents
// have modified it
// When a retransformable agent is attached, JVMTI caches the
// class bytes that existed before the first retransformation.
// If RedefineClasses() was used before the retransformable
// agent attached, then the cached class bytes may not be the
// original class bytes.
unsigned char *cached_class_file_bytes = NULL;
jint cached_class_file_length;
@ -2660,6 +2666,25 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
_max_bootstrap_specifier_index = -1;
if (JvmtiExport::should_post_class_file_load_hook()) {
// Get the cached class file bytes (if any) from the class that
// is being redefined or retransformed. We use jvmti_thread_state()
// instead of JvmtiThreadState::state_for(jt) so we don't allocate
// a JvmtiThreadState any earlier than necessary. This will help
// avoid the bug described by 7126851.
JvmtiThreadState *state = jt->jvmti_thread_state();
if (state != NULL) {
KlassHandle *h_class_being_redefined =
state->get_class_being_redefined();
if (h_class_being_redefined != NULL) {
instanceKlassHandle ikh_class_being_redefined =
instanceKlassHandle(THREAD, (*h_class_being_redefined)());
cached_class_file_bytes =
ikh_class_being_redefined->get_cached_class_file_bytes();
cached_class_file_length =
ikh_class_being_redefined->get_cached_class_file_len();
}
}
unsigned char* ptr = cfs->buffer();
unsigned char* end_ptr = cfs->buffer() + cfs->length();
@ -2843,7 +2868,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, CHECK_(nullHandle));
}
int java_fields_count = 0;
u2 java_fields_count = 0;
// Fields (offsets are filled in later)
FieldAllocationCount fac;
objArrayHandle fields_annotations;

View File

@ -91,7 +91,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
constantPoolHandle cp, bool is_interface,
FieldAllocationCount *fac,
objArrayHandle* fields_annotations,
int* java_fields_count_ptr, TRAPS);
u2* java_fields_count_ptr, TRAPS);
// Method parsing
methodHandle parse_method(constantPoolHandle cp, bool is_interface,

View File

@ -296,6 +296,7 @@
template(finalize_method_name, "finalize") \
template(reference_lock_name, "lock") \
template(reference_discovered_name, "discovered") \
template(run_finalization_name, "runFinalization") \
template(run_finalizers_on_exit_name, "runFinalizersOnExit") \
template(uncaughtException_name, "uncaughtException") \
template(dispatchUncaughtException_name, "dispatchUncaughtException") \

View File

@ -2598,7 +2598,7 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
int CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
_cfls(cfls)
@ -2732,7 +2732,7 @@ void CFLS_LAB::retire(int tid) {
// Update globals stats for num_blocks used
_global_num_blocks[i] += (_num_blocks[i] - num_retire);
_global_num_workers[i]++;
assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
if (num_retire > 0) {
_cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
// Reset this list.

View File

@ -631,7 +631,7 @@ class CFLS_LAB : public CHeapObj {
static AdaptiveWeightedAverage
_blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
// Internal work method

View File

@ -3779,7 +3779,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask {
terminator()->reset_for_reuse(active_workers);
}
void work(int i);
void work(uint worker_id);
bool should_yield() {
return ConcurrentMarkSweepThread::should_yield()
&& !_collector->foregroundGCIsActive()
@ -3852,7 +3852,7 @@ void CMSConcMarkingTerminator::yield() {
// . if neither is available, offer termination
// -- Terminate and return result
//
void CMSConcMarkingTask::work(int i) {
void CMSConcMarkingTask::work(uint worker_id) {
elapsedTimer _timer;
ResourceMark rm;
HandleMark hm;
@ -3860,37 +3860,40 @@ void CMSConcMarkingTask::work(int i) {
DEBUG_ONLY(_collector->verify_overflow_empty();)
// Before we begin work, our work queue should be empty
assert(work_queue(i)->size() == 0, "Expected to be empty");
assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
// Scan the bitmap covering _cms_space, tracing through grey objects.
_timer.start();
do_scan_and_mark(i, _cms_space);
do_scan_and_mark(worker_id, _cms_space);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
worker_id, _timer.seconds());
// XXX: need xxx/xxx type of notation, two timers
}
// ... do the same for the _perm_space
_timer.reset();
_timer.start();
do_scan_and_mark(i, _perm_space);
do_scan_and_mark(worker_id, _perm_space);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
worker_id, _timer.seconds());
// XXX: need xxx/xxx type of notation, two timers
}
// ... do work stealing
_timer.reset();
_timer.start();
do_work_steal(i);
do_work_steal(worker_id);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
worker_id, _timer.seconds());
// XXX: need xxx/xxx type of notation, two timers
}
assert(_collector->_markStack.isEmpty(), "Should have been emptied");
assert(work_queue(i)->size() == 0, "Should have been emptied");
assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
// Note that under the current task protocol, the
// following assertion is true even of the spaces
// expanded since the completion of the concurrent
@ -3946,7 +3949,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
// We allow that there may be no tasks to do here because
// we are restarting after a stack overflow.
assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
int nth_task = 0;
uint nth_task = 0;
HeapWord* aligned_start = sp->bottom();
if (sp->used_region().contains(_restart_addr)) {
@ -5075,7 +5078,7 @@ class CMSParRemarkTask: public AbstractGangTask {
ParallelTaskTerminator* terminator() { return &_term; }
int n_workers() { return _n_workers; }
void work(int i);
void work(uint worker_id);
private:
// Work method in support of parallel rescan ... of young gen spaces
@ -5096,7 +5099,7 @@ class CMSParRemarkTask: public AbstractGangTask {
// also is passed to do_dirty_card_rescan_tasks() and to
// do_work_steal() to select the i-th task_queue.
void CMSParRemarkTask::work(int i) {
void CMSParRemarkTask::work(uint worker_id) {
elapsedTimer _timer;
ResourceMark rm;
HandleMark hm;
@ -5107,7 +5110,7 @@ void CMSParRemarkTask::work(int i) {
Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
_collector->_span, _collector->ref_processor(),
&(_collector->_markBitMap),
work_queue(i), &(_collector->_revisitStack));
work_queue(worker_id), &(_collector->_revisitStack));
// Rescan young gen roots first since these are likely
// coarsely partitioned and may, on that account, constitute
@ -5128,15 +5131,15 @@ void CMSParRemarkTask::work(int i) {
assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
"Finished young gen rescan work in %dth thread: %3.3f sec",
i, _timer.seconds());
worker_id, _timer.seconds());
}
}
@ -5158,7 +5161,7 @@ void CMSParRemarkTask::work(int i) {
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
"Finished remaining root rescan work in %dth thread: %3.3f sec",
i, _timer.seconds());
worker_id, _timer.seconds());
}
// ---------- rescan dirty cards ------------
@ -5167,26 +5170,26 @@ void CMSParRemarkTask::work(int i) {
// Do the rescan tasks for each of the two spaces
// (cms_space and perm_space) in turn.
// "i" is passed to select the "i-th" task_queue
do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
// "worker_id" is passed to select the task_queue for "worker_id"
do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
do_dirty_card_rescan_tasks(_perm_space, worker_id, &par_mrias_cl);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
"Finished dirty card rescan work in %dth thread: %3.3f sec",
i, _timer.seconds());
worker_id, _timer.seconds());
}
// ---------- steal work from other threads ...
// ---------- ... and drain overflow list.
_timer.reset();
_timer.start();
do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
"Finished work stealing in %dth thread: %3.3f sec",
i, _timer.seconds());
worker_id, _timer.seconds());
}
}
@ -5207,8 +5210,8 @@ CMSParRemarkTask::do_young_space_rescan(int i,
SequentialSubTasksDone* pst = space->par_seq_tasks();
assert(pst->valid(), "Uninitialized use?");
int nth_task = 0;
int n_tasks = pst->n_tasks();
uint nth_task = 0;
uint n_tasks = pst->n_tasks();
HeapWord *start, *end;
while (!pst->is_task_claimed(/* reference */ nth_task)) {
@ -5220,12 +5223,12 @@ CMSParRemarkTask::do_young_space_rescan(int i,
} else if (nth_task == 0) {
start = space->bottom();
end = chunk_array[nth_task];
} else if (nth_task < (jint)chunk_top) {
} else if (nth_task < (uint)chunk_top) {
assert(nth_task >= 1, "Control point invariant");
start = chunk_array[nth_task - 1];
end = chunk_array[nth_task];
} else {
assert(nth_task == (jint)chunk_top, "Control point invariant");
assert(nth_task == (uint)chunk_top, "Control point invariant");
start = chunk_array[chunk_top - 1];
end = space->top();
}
@ -5288,7 +5291,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
assert(pst->valid(), "Uninitialized use?");
int nth_task = 0;
uint nth_task = 0;
const int alignment = CardTableModRefBS::card_size * BitsPerWord;
MemRegion span = sp->used_region();
HeapWord* start_addr = span.start();
@ -5736,26 +5739,26 @@ public:
CMSParKeepAliveClosure* keep_alive,
int* seed);
virtual void work(int i);
virtual void work(uint worker_id);
};
void CMSRefProcTaskProxy::work(int i) {
void CMSRefProcTaskProxy::work(uint worker_id) {
assert(_collector->_span.equals(_span), "Inconsistency in _span");
CMSParKeepAliveClosure par_keep_alive(_collector, _span,
_mark_bit_map,
&_collector->_revisitStack,
work_queue(i));
work_queue(worker_id));
CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
_mark_bit_map,
&_collector->_revisitStack,
work_queue(i));
work_queue(worker_id));
CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
_task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
_task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
if (_task.marks_oops_alive()) {
do_work_steal(i, &par_drain_stack, &par_keep_alive,
_collector->hash_seed(i));
do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
_collector->hash_seed(worker_id));
}
assert(work_queue(i)->size() == 0, "work_queue should be empty");
assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
}
@ -5769,9 +5772,9 @@ public:
_task(task)
{ }
virtual void work(int i)
virtual void work(uint worker_id)
{
_task.work(i);
_task.work(worker_id);
}
};

View File

@ -264,7 +264,7 @@ prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
// or some improperly initialized variable with leads to no
// active threads, protect against that in a product build.
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
1);
1U);
}
size_t max_waste = n_threads * chunkSize;
// it should be aligned with respect to chunkSize

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
@ -183,12 +184,11 @@ CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
void CMMarkStack::allocate(size_t size) {
_base = NEW_C_HEAP_ARRAY(oop, size);
if (_base == NULL) {
vm_exit_during_initialization("Failed to allocate "
"CM region mark stack");
vm_exit_during_initialization("Failed to allocate CM region mark stack");
}
_index = 0;
_capacity = (jint) size;
_oops_do_bound = -1;
_saved_index = -1;
NOT_PRODUCT(_max_depth = 0);
}
@ -283,7 +283,6 @@ bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
}
}
CMRegionStack::CMRegionStack() : _base(NULL) {}
void CMRegionStack::allocate(size_t size) {
@ -302,6 +301,8 @@ CMRegionStack::~CMRegionStack() {
}
void CMRegionStack::push_lock_free(MemRegion mr) {
guarantee(false, "push_lock_free(): don't call this any more");
assert(mr.word_size() > 0, "Precondition");
while (true) {
jint index = _index;
@ -325,6 +326,8 @@ void CMRegionStack::push_lock_free(MemRegion mr) {
// marking / remark phases. Should only be called in tandem with
// other lock-free pops.
MemRegion CMRegionStack::pop_lock_free() {
guarantee(false, "pop_lock_free(): don't call this any more");
while (true) {
jint index = _index;
@ -390,6 +393,8 @@ MemRegion CMRegionStack::pop_with_lock() {
#endif
bool CMRegionStack::invalidate_entries_into_cset() {
guarantee(false, "invalidate_entries_into_cset(): don't call this any more");
bool result = false;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
for (int i = 0; i < _oops_do_bound; ++i) {
@ -438,14 +443,29 @@ bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
return res;
}
void CMMarkStack::note_start_of_gc() {
assert(_saved_index == -1,
"note_start_of_gc()/end_of_gc() bracketed incorrectly");
_saved_index = _index;
}
void CMMarkStack::note_end_of_gc() {
// This is intentionally a guarantee, instead of an assert. If we
// accidentally add something to the mark stack during GC, it
// will be a correctness issue so it's better if we crash. we'll
// only check this once per GC anyway, so it won't be a performance
// issue in any way.
guarantee(_saved_index == _index,
err_msg("saved index: %d index: %d", _saved_index, _index));
_saved_index = -1;
}
void CMMarkStack::oops_do(OopClosure* f) {
if (_index == 0) return;
assert(_oops_do_bound != -1 && _oops_do_bound <= _index,
"Bound must be set.");
for (int i = 0; i < _oops_do_bound; i++) {
assert(_saved_index == _index,
err_msg("saved index: %d index: %d", _saved_index, _index));
for (int i = 0; i < _index; i += 1) {
f->do_oop(&_base[i]);
}
_oops_do_bound = -1;
}
bool ConcurrentMark::not_yet_marked(oop obj) const {
@ -458,8 +478,8 @@ bool ConcurrentMark::not_yet_marked(oop obj) const {
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
size_t ConcurrentMark::scale_parallel_threads(size_t n_par_threads) {
return MAX2((n_par_threads + 2) / 4, (size_t)1);
uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
return MAX2((n_par_threads + 2) / 4, 1U);
}
ConcurrentMark::ConcurrentMark(ReservedSpace rs,
@ -486,7 +506,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_regionStack(),
// _finger set in set_non_marking_state
_max_task_num(MAX2(ParallelGCThreads, (size_t)1)),
_max_task_num(MAX2((uint)ParallelGCThreads, 1U)),
// _active_tasks set in set_non_marking_state
// _tasks set inside the constructor
_task_queues(new CMTaskQueueSet((int) _max_task_num)),
@ -506,7 +526,6 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_cleanup_times(),
_total_counting_time(0.0),
_total_rs_scrub_time(0.0),
_parallel_workers(NULL) {
CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
if (verbose_level < no_verbose) {
@ -568,7 +587,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
// notice that ConcGCThreads overwrites G1MarkingOverheadPercent
// if both are set
_parallel_marking_threads = ConcGCThreads;
_parallel_marking_threads = (uint) ConcGCThreads;
_max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = 0.0;
_marking_task_overhead = 1.0;
@ -589,12 +608,12 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
double sleep_factor =
(1.0 - marking_task_overhead) / marking_task_overhead;
_parallel_marking_threads = (size_t) marking_thread_num;
_parallel_marking_threads = (uint) marking_thread_num;
_max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = sleep_factor;
_marking_task_overhead = marking_task_overhead;
} else {
_parallel_marking_threads = scale_parallel_threads(ParallelGCThreads);
_parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads);
_max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = 0.0;
_marking_task_overhead = 1.0;
@ -618,7 +637,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
guarantee(parallel_marking_threads() > 0, "peace of mind");
_parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
(int) _max_parallel_marking_threads, false, true);
_max_parallel_marking_threads, false, true);
if (_parallel_workers == NULL) {
vm_exit_during_initialization("Failed necessary allocation.");
} else {
@ -691,7 +710,7 @@ void ConcurrentMark::reset() {
set_concurrent_marking_in_progress();
}
void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) {
void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
assert(active_tasks <= _max_task_num, "we should not have more");
_active_tasks = active_tasks;
@ -727,12 +746,8 @@ void ConcurrentMark::set_non_marking_state() {
}
ConcurrentMark::~ConcurrentMark() {
for (int i = 0; i < (int) _max_task_num; ++i) {
delete _task_queues->queue(i);
delete _tasks[i];
}
delete _task_queues;
FREE_C_HEAP_ARRAY(CMTask*, _max_task_num);
// The ConcurrentMark instance is never freed.
ShouldNotReachHere();
}
// This closure is used to mark refs into the g1 generation
@ -788,7 +803,7 @@ class NoteStartOfMarkHRClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
if (!r->continuesHumongous()) {
r->note_start_of_marking(true);
r->note_start_of_marking();
}
return false;
}
@ -809,6 +824,10 @@ void ConcurrentMark::checkpointRootsInitialPre() {
// Initialise marking structures. This has to be done in a STW phase.
reset();
// For each region note start of marking.
NoteStartOfMarkHRClosure startcl;
g1h->heap_region_iterate(&startcl);
}
@ -823,10 +842,6 @@ void ConcurrentMark::checkpointRootsInitialPost() {
// every remark and we'll eventually not need to cause one.
force_overflow_stw()->init();
// For each region note start of marking.
NoteStartOfMarkHRClosure startcl;
g1h->heap_region_iterate(&startcl);
// Start Concurrent Marking weak-reference discovery.
ReferenceProcessor* rp = g1h->ref_processor_cm();
// enable ("weak") refs discovery
@ -951,22 +966,9 @@ bool ForceOverflowSettings::should_force() {
}
#endif // !PRODUCT
void ConcurrentMark::grayRoot(oop p) {
HeapWord* addr = (HeapWord*) p;
// We can't really check against _heap_start and _heap_end, since it
// is possible during an evacuation pause with piggy-backed
// initial-mark that the committed space is expanded during the
// pause without CM observing this change. So the assertions below
// is a bit conservative; but better than nothing.
assert(_g1h->g1_committed().contains(addr),
"address should be within the heap bounds");
if (!_nextMarkBitMap->isMarked(addr)) {
_nextMarkBitMap->parMark(addr);
}
}
void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
guarantee(false, "grayRegionIfNecessary(): don't call this any more");
// The objects on the region have already been marked "in bulk" by
// the caller. We only need to decide whether to push the region on
// the region stack or not.
@ -1012,6 +1014,8 @@ void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
}
void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) {
guarantee(false, "markAndGrayObjectIfNecessary(): don't call this any more");
// The object is not marked by the caller. We need to at least mark
// it and maybe push in on the stack.
@ -1048,7 +1052,7 @@ private:
ConcurrentMarkThread* _cmt;
public:
void work(int worker_i) {
void work(uint worker_id) {
assert(Thread::current()->is_ConcurrentGC_thread(),
"this should only be done by a conc GC thread");
ResourceMark rm;
@ -1057,8 +1061,8 @@ public:
ConcurrentGCThread::stsJoin();
assert((size_t) worker_i < _cm->active_tasks(), "invariant");
CMTask* the_task = _cm->task(worker_i);
assert(worker_id < _cm->active_tasks(), "invariant");
CMTask* the_task = _cm->task(worker_id);
the_task->record_start_time();
if (!_cm->has_aborted()) {
do {
@ -1076,7 +1080,7 @@ public:
double elapsed_time_sec = end_time_sec - start_time_sec;
_cm->clear_has_overflown();
bool ret = _cm->do_yield_check(worker_i);
bool ret = _cm->do_yield_check(worker_id);
jlong sleep_time_ms;
if (!_cm->has_aborted() && the_task->has_aborted()) {
@ -1105,7 +1109,7 @@ public:
ConcurrentGCThread::stsLeave();
double end_vtime = os::elapsedVTime();
_cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
_cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
}
CMConcurrentMarkingTask(ConcurrentMark* cm,
@ -1117,9 +1121,9 @@ public:
// Calculates the number of active workers for a concurrent
// phase.
size_t ConcurrentMark::calc_parallel_marking_threads() {
uint ConcurrentMark::calc_parallel_marking_threads() {
if (G1CollectedHeap::use_parallel_gc_threads()) {
size_t n_conc_workers = 0;
uint n_conc_workers = 0;
if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ConcGCThreads) &&
!ForceDynamicNumberOfGCThreads)) {
@ -1159,7 +1163,7 @@ void ConcurrentMark::markFromRoots() {
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
"Maximum number of marking threads exceeded");
size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
uint active_workers = MAX2(1U, parallel_marking_threads());
// Parallel task terminator is set in "set_phase()"
set_phase(active_workers, true /* concurrent */);
@ -1229,7 +1233,6 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
true /* expected_active */);
if (VerifyDuringGC) {
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(after)");
Universe::heap()->prepare_for_verify();
@ -1503,7 +1506,7 @@ class G1ParFinalCountTask: public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
CMBitMap* _bm;
size_t _n_workers;
uint _n_workers;
size_t *_live_bytes;
size_t *_used_bytes;
BitMap* _region_bm;
@ -1535,13 +1538,13 @@ public:
FREE_C_HEAP_ARRAY(size_t, _used_bytes);
}
void work(int i) {
void work(uint worker_id) {
CalcLiveObjectsClosure calccl(true /*final*/,
_bm, _g1h->concurrent_mark(),
_region_bm, _card_bm);
calccl.no_yield();
if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&calccl, i,
_g1h->heap_region_par_iterate_chunked(&calccl, worker_id,
(int) _n_workers,
HeapRegion::FinalCountClaimValue);
} else {
@ -1549,19 +1552,19 @@ public:
}
assert(calccl.complete(), "Shouldn't have yielded!");
assert((size_t) i < _n_workers, "invariant");
_live_bytes[i] = calccl.tot_live();
_used_bytes[i] = calccl.tot_used();
assert(worker_id < _n_workers, "invariant");
_live_bytes[worker_id] = calccl.tot_live();
_used_bytes[worker_id] = calccl.tot_used();
}
size_t live_bytes() {
size_t live_bytes = 0;
for (size_t i = 0; i < _n_workers; ++i)
for (uint i = 0; i < _n_workers; ++i)
live_bytes += _live_bytes[i];
return live_bytes;
}
size_t used_bytes() {
size_t used_bytes = 0;
for (size_t i = 0; i < _n_workers; ++i)
for (uint i = 0; i < _n_workers; ++i)
used_bytes += _used_bytes[i];
return used_bytes;
}
@ -1646,18 +1649,18 @@ public:
AbstractGangTask("G1 note end"), _g1h(g1h),
_max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
void work(int i) {
void work(uint worker_id) {
double start = os::elapsedTime();
FreeRegionList local_cleanup_list("Local Cleanup List");
OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
HRRSCleanupTask hrrs_cleanup_task;
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list,
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
&old_proxy_set,
&humongous_proxy_set,
&hrrs_cleanup_task);
if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
_g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
_g1h->workers()->active_workers(),
HeapRegion::NoteEndClaimValue);
} else {
@ -1701,8 +1704,8 @@ public:
double end = os::elapsedTime();
if (G1PrintParCleanupStats) {
gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
"claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n",
i, start, end, (end-start)*1000.0,
"claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
worker_id, start, end, (end-start)*1000.0,
g1_note_end.regions_claimed(),
g1_note_end.claimed_region_time_sec()*1000.0,
g1_note_end.max_region_time_sec()*1000.0);
@ -1724,9 +1727,9 @@ public:
_region_bm(region_bm), _card_bm(card_bm)
{}
void work(int i) {
void work(uint worker_id) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1rs->scrub_par(_region_bm, _card_bm, i,
_g1rs->scrub_par(_region_bm, _card_bm, worker_id,
HeapRegion::ScrubRemSetClaimValue);
} else {
_g1rs->scrub(_region_bm, _card_bm);
@ -1766,7 +1769,7 @@ void ConcurrentMark::cleanup() {
HeapRegionRemSet::reset_for_cleanup_tasks();
size_t n_workers;
uint n_workers;
// Do counting once more with the world stopped for good measure.
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
@ -1778,7 +1781,7 @@ void ConcurrentMark::cleanup() {
g1h->set_par_threads();
n_workers = g1h->n_par_threads();
assert(g1h->n_par_threads() == (int) n_workers,
assert(g1h->n_par_threads() == n_workers,
"Should not have been reset");
g1h->workers()->run_task(&g1_par_count_task);
// Done with the parallel phase so reset to 0.
@ -1884,10 +1887,6 @@ void ConcurrentMark::cleanup() {
double end = os::elapsedTime();
_cleanup_times.add((end - start) * 1000.0);
// G1CollectedHeap::heap()->print();
// gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
// G1CollectedHeap::heap()->get_gc_time_stamp());
if (PrintGC || PrintGCDetails) {
g1h->print_size_transition(gclog_or_tty,
start_used_bytes,
@ -2169,13 +2168,13 @@ public:
AbstractGangTask("Process reference objects in parallel"),
_proc_task(proc_task), _g1h(g1h), _cm(cm) { }
virtual void work(int i) {
CMTask* marking_task = _cm->task(i);
virtual void work(uint worker_id) {
CMTask* marking_task = _cm->task(worker_id);
G1CMIsAliveClosure g1_is_alive(_g1h);
G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
_proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
_proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
}
};
@ -2201,8 +2200,8 @@ public:
AbstractGangTask("Enqueue reference objects in parallel"),
_enq_task(enq_task) { }
virtual void work(int i) {
_enq_task.work(i);
virtual void work(uint worker_id) {
_enq_task.work(worker_id);
}
};
@ -2249,8 +2248,8 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// We use the work gang from the G1CollectedHeap and we utilize all
// the worker threads.
int active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1;
active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U;
active_workers = MAX2(MIN2(active_workers, _max_task_num), 1U);
G1CMRefProcTaskExecutor par_task_executor(g1h, this,
g1h->workers(), active_workers);
@ -2314,11 +2313,11 @@ private:
ConcurrentMark *_cm;
public:
void work(int worker_i) {
void work(uint worker_id) {
// Since all available tasks are actually started, we should
// only proceed if we're supposed to be actived.
if ((size_t)worker_i < _cm->active_tasks()) {
CMTask* task = _cm->task(worker_i);
if (worker_id < _cm->active_tasks()) {
CMTask* task = _cm->task(worker_id);
task->record_start_time();
do {
task->do_marking_step(1000000000.0 /* something very large */,
@ -2347,10 +2346,10 @@ void ConcurrentMark::checkpointRootsFinalWork() {
if (G1CollectedHeap::use_parallel_gc_threads()) {
G1CollectedHeap::StrongRootsScope srs(g1h);
// this is remark, so we'll use up all active threads
int active_workers = g1h->workers()->active_workers();
uint active_workers = g1h->workers()->active_workers();
if (active_workers == 0) {
assert(active_workers > 0, "Should have been set earlier");
active_workers = ParallelGCThreads;
active_workers = (uint) ParallelGCThreads;
g1h->workers()->set_active_workers(active_workers);
}
set_phase(active_workers, false /* concurrent */);
@ -2366,7 +2365,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
} else {
G1CollectedHeap::StrongRootsScope srs(g1h);
// this is remark, so we'll use up all available threads
int active_workers = 1;
uint active_workers = 1;
set_phase(active_workers, false /* concurrent */);
CMRemarkTask remarkTask(this, active_workers);
@ -2674,6 +2673,8 @@ void ConcurrentMark::deal_with_reference(oop obj) {
}
void ConcurrentMark::drainAllSATBBuffers() {
guarantee(false, "drainAllSATBBuffers(): don't call this any more");
CMGlobalObjectClosure oc(this);
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
satb_mq_set.set_closure(&oc);
@ -2692,12 +2693,6 @@ void ConcurrentMark::drainAllSATBBuffers() {
assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
}
void ConcurrentMark::markPrev(oop p) {
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p);
}
void ConcurrentMark::clear(oop p) {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
@ -2707,13 +2702,21 @@ void ConcurrentMark::clear(oop p) {
_nextMarkBitMap->clear(addr);
}
void ConcurrentMark::clearRangeBothMaps(MemRegion mr) {
void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
}
void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
_nextMarkBitMap->clearRange(mr);
}
void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
clearRangePrevBitmap(mr);
clearRangeNextBitmap(mr);
}
HeapRegion*
ConcurrentMark::claim_region(int task_num) {
// "checkpoint" the finger
@ -2808,6 +2811,9 @@ ConcurrentMark::claim_region(int task_num) {
}
bool ConcurrentMark::invalidate_aborted_regions_in_cset() {
guarantee(false, "invalidate_aborted_regions_in_cset(): "
"don't call this any more");
bool result = false;
for (int i = 0; i < (int)_max_task_num; ++i) {
CMTask* the_task = _tasks[i];
@ -2859,25 +2865,136 @@ void ConcurrentMark::oops_do(OopClosure* cl) {
// ...then over the contents of the all the task queues.
queue->oops_do(cl);
}
// Invalidate any entries, that are in the region stack, that
// point into the collection set
if (_regionStack.invalidate_entries_into_cset()) {
// otherwise, any gray objects copied during the evacuation pause
// might not be visited.
assert(_should_gray_objects, "invariant");
}
// Invalidate any aborted regions, recorded in the individual CM
// tasks, that point into the collection set.
if (invalidate_aborted_regions_in_cset()) {
// otherwise, any gray objects copied during the evacuation pause
// might not be visited.
assert(_should_gray_objects, "invariant");
}
}
#ifndef PRODUCT
enum VerifyNoCSetOopsPhase {
VerifyNoCSetOopsStack,
VerifyNoCSetOopsQueues,
VerifyNoCSetOopsSATBCompleted,
VerifyNoCSetOopsSATBThread
};
class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
private:
G1CollectedHeap* _g1h;
VerifyNoCSetOopsPhase _phase;
int _info;
const char* phase_str() {
switch (_phase) {
case VerifyNoCSetOopsStack: return "Stack";
case VerifyNoCSetOopsQueues: return "Queue";
case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
default: ShouldNotReachHere();
}
return NULL;
}
void do_object_work(oop obj) {
guarantee(!_g1h->obj_in_cs(obj),
err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
(void*) obj, phase_str(), _info));
}
public:
VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
_phase = phase;
_info = info;
}
virtual void do_oop(oop* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
do_object_work(obj);
}
virtual void do_oop(narrowOop* p) {
// We should not come across narrow oops while scanning marking
// stacks and SATB buffers.
ShouldNotReachHere();
}
virtual void do_object(oop obj) {
do_object_work(obj);
}
};
void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
bool verify_enqueued_buffers,
bool verify_thread_buffers,
bool verify_fingers) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
if (!G1CollectedHeap::heap()->mark_in_progress()) {
return;
}
VerifyNoCSetOopsClosure cl;
if (verify_stacks) {
// Verify entries on the global mark stack
cl.set_phase(VerifyNoCSetOopsStack);
_markStack.oops_do(&cl);
// Verify entries on the task queues
for (int i = 0; i < (int) _max_task_num; i += 1) {
cl.set_phase(VerifyNoCSetOopsQueues, i);
OopTaskQueue* queue = _task_queues->queue(i);
queue->oops_do(&cl);
}
}
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
// Verify entries on the enqueued SATB buffers
if (verify_enqueued_buffers) {
cl.set_phase(VerifyNoCSetOopsSATBCompleted);
satb_qs.iterate_completed_buffers_read_only(&cl);
}
// Verify entries on the per-thread SATB buffers
if (verify_thread_buffers) {
cl.set_phase(VerifyNoCSetOopsSATBThread);
satb_qs.iterate_thread_buffers_read_only(&cl);
}
if (verify_fingers) {
// Verify the global finger
HeapWord* global_finger = finger();
if (global_finger != NULL && global_finger < _heap_end) {
// The global finger always points to a heap region boundary. We
// use heap_region_containing_raw() to get the containing region
// given that the global finger could be pointing to a free region
// which subsequently becomes continues humongous. If that
// happens, heap_region_containing() will return the bottom of the
// corresponding starts humongous region and the check below will
// not hold any more.
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
guarantee(global_finger == global_hr->bottom(),
err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
global_finger, HR_FORMAT_PARAMS(global_hr)));
}
// Verify the task fingers
assert(parallel_marking_threads() <= _max_task_num, "sanity");
for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
CMTask* task = _tasks[i];
HeapWord* task_finger = task->finger();
if (task_finger != NULL && task_finger < _heap_end) {
// See above note on the global finger verification.
HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
guarantee(task_finger == task_hr->bottom() ||
!task_hr->in_collection_set(),
err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
task_finger, HR_FORMAT_PARAMS(task_hr)));
}
}
}
}
#endif // PRODUCT
void ConcurrentMark::clear_marking_state(bool clear_overflow) {
_markStack.setEmpty();
_markStack.clear_overflow();
@ -2921,7 +3038,7 @@ class CSetMarkOopClosure: public OopClosure {
int _ms_size;
int _ms_ind;
int _array_increment;
int _worker_i;
uint _worker_id;
bool push(oop obj, int arr_ind = 0) {
if (_ms_ind == _ms_size) {
@ -2971,7 +3088,7 @@ class CSetMarkOopClosure: public OopClosure {
}
public:
CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, int worker_i) :
CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, uint worker_id) :
_g1h(G1CollectedHeap::heap()),
_cm(cm),
_bm(cm->nextMarkBitMap()),
@ -2979,7 +3096,7 @@ public:
_ms(NEW_C_HEAP_ARRAY(oop, ms_size)),
_array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)),
_array_increment(MAX2(ms_size/8, 16)),
_worker_i(worker_i) { }
_worker_id(worker_id) { }
~CSetMarkOopClosure() {
FREE_C_HEAP_ARRAY(oop, _ms);
@ -3024,14 +3141,14 @@ class CSetMarkBitMapClosure: public BitMapClosure {
CMBitMap* _bitMap;
ConcurrentMark* _cm;
CSetMarkOopClosure _oop_cl;
int _worker_i;
uint _worker_id;
public:
CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_i) :
CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_id) :
_g1h(G1CollectedHeap::heap()),
_bitMap(cm->nextMarkBitMap()),
_oop_cl(cm, ms_size, worker_i),
_worker_i(worker_i) { }
_oop_cl(cm, ms_size, worker_id),
_worker_id(worker_id) { }
bool do_bit(size_t offset) {
// convert offset into a HeapWord*
@ -3056,17 +3173,17 @@ public:
class CompleteMarkingInCSetHRClosure: public HeapRegionClosure {
CMBitMap* _bm;
CSetMarkBitMapClosure _bit_cl;
int _worker_i;
uint _worker_id;
enum SomePrivateConstants {
MSSize = 1000
};
public:
CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_i) :
CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_id) :
_bm(cm->nextMarkBitMap()),
_bit_cl(cm, MSSize, worker_i),
_worker_i(worker_i) { }
_bit_cl(cm, MSSize, worker_id),
_worker_id(worker_id) { }
bool doHeapRegion(HeapRegion* hr) {
if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) {
@ -3085,19 +3202,6 @@ public:
}
};
class SetClaimValuesInCSetHRClosure: public HeapRegionClosure {
jint _claim_value;
public:
SetClaimValuesInCSetHRClosure(jint claim_value) :
_claim_value(claim_value) { }
bool doHeapRegion(HeapRegion* hr) {
hr->set_claim_value(_claim_value);
return false;
}
};
class G1ParCompleteMarkInCSetTask: public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
@ -3109,14 +3213,17 @@ public:
AbstractGangTask("Complete Mark in CSet"),
_g1h(g1h), _cm(cm) { }
void work(int worker_i) {
CompleteMarkingInCSetHRClosure cmplt(_cm, worker_i);
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_i);
void work(uint worker_id) {
CompleteMarkingInCSetHRClosure cmplt(_cm, worker_id);
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
_g1h->collection_set_iterate_from(hr, &cmplt);
}
};
void ConcurrentMark::complete_marking_in_collection_set() {
guarantee(false, "complete_marking_in_collection_set(): "
"don't call this any more");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (!g1h->mark_in_progress()) {
@ -3140,9 +3247,8 @@ void ConcurrentMark::complete_marking_in_collection_set() {
assert(g1h->check_cset_heap_region_claim_values(HeapRegion::CompleteMarkCSetClaimValue), "sanity");
// Now reset the claim values in the regions in the collection set.
SetClaimValuesInCSetHRClosure set_cv_cl(HeapRegion::InitialClaimValue);
g1h->collection_set_iterate(&set_cv_cl);
// Reset the claim values in the regions in the collection set.
g1h->reset_cset_heap_region_claim_values();
assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
@ -3165,6 +3271,8 @@ void ConcurrentMark::complete_marking_in_collection_set() {
// newCSet().
void ConcurrentMark::newCSet() {
guarantee(false, "newCSet(): don't call this any more");
if (!concurrent_marking_in_progress()) {
// nothing to do if marking is not in progress
return;
@ -3203,6 +3311,8 @@ void ConcurrentMark::newCSet() {
}
void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
guarantee(false, "registerCSetRegion(): don't call this any more");
if (!concurrent_marking_in_progress()) return;
HeapWord* region_end = hr->end();
@ -3214,6 +3324,9 @@ void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
// Resets the region fields of active CMTasks whose values point
// into the collection set.
void ConcurrentMark::reset_active_task_region_fields_in_cset() {
guarantee(false, "reset_active_task_region_fields_in_cset(): "
"don't call this any more");
assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
assert(parallel_marking_threads() <= _max_task_num, "sanity");
@ -3307,13 +3420,13 @@ void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
// the CMS bit map. Called at the first checkpoint.
// We take a break if someone is trying to stop the world.
bool ConcurrentMark::do_yield_check(int worker_i) {
bool ConcurrentMark::do_yield_check(uint worker_id) {
if (should_yield()) {
if (worker_i == 0) {
if (worker_id == 0) {
_g1h->g1_policy()->record_concurrent_pause();
}
cmThread()->yield();
if (worker_i == 0) {
if (worker_id == 0) {
_g1h->g1_policy()->record_concurrent_pause_end();
}
return true;
@ -3924,6 +4037,10 @@ void CMTask::drain_satb_buffers() {
}
void CMTask::drain_region_stack(BitMapClosure* bc) {
assert(_cm->region_stack_empty(), "region stack should be empty");
assert(_aborted_region.is_empty(), "aborted region should be empty");
return;
if (has_aborted()) return;
assert(_region_finger == NULL,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -166,10 +166,10 @@ class CMBitMap : public CMBitMapRO {
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
class CMMarkStack VALUE_OBJ_CLASS_SPEC {
ConcurrentMark* _cm;
oop* _base; // bottom of stack
jint _index; // one more than last occupied index
jint _capacity; // max #elements
jint _oops_do_bound; // Number of elements to include in next iteration.
oop* _base; // bottom of stack
jint _index; // one more than last occupied index
jint _capacity; // max #elements
jint _saved_index; // value of _index saved at start of GC
NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
bool _overflow;
@ -247,16 +247,12 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC {
void setEmpty() { _index = 0; clear_overflow(); }
// Record the current size; a subsequent "oops_do" will iterate only over
// indices valid at the time of this call.
void set_oops_do_bound(jint bound = -1) {
if (bound == -1) {
_oops_do_bound = _index;
} else {
_oops_do_bound = bound;
}
}
jint oops_do_bound() { return _oops_do_bound; }
// Record the current index.
void note_start_of_gc();
// Make sure that we have not added any entries to the stack during GC.
void note_end_of_gc();
// iterate over the oops in the mark stack, up to the bound recorded via
// the call above.
void oops_do(OopClosure* f);
@ -374,9 +370,9 @@ class ConcurrentMark: public CHeapObj {
protected:
ConcurrentMarkThread* _cmThread; // the thread doing the work
G1CollectedHeap* _g1h; // the heap.
size_t _parallel_marking_threads; // the number of marking
uint _parallel_marking_threads; // the number of marking
// threads we're use
size_t _max_parallel_marking_threads; // max number of marking
uint _max_parallel_marking_threads; // max number of marking
// threads we'll ever use
double _sleep_factor; // how much we have to sleep, with
// respect to the work we just did, to
@ -412,8 +408,8 @@ protected:
// last claimed region
// marking tasks
size_t _max_task_num; // maximum task number
size_t _active_tasks; // task num currently active
uint _max_task_num; // maximum task number
uint _active_tasks; // task num currently active
CMTask** _tasks; // task queue array (max_task_num len)
CMTaskQueueSet* _task_queues; // task queue set
ParallelTaskTerminator _terminator; // for termination
@ -492,7 +488,7 @@ protected:
// It should be called to indicate which phase we're in (concurrent
// mark or remark) and how many threads are currently active.
void set_phase(size_t active_tasks, bool concurrent);
void set_phase(uint active_tasks, bool concurrent);
// We do this after we're done with marking so that the marking data
// structures are initialised to a sensible and predictable state.
void set_non_marking_state();
@ -505,8 +501,8 @@ protected:
}
// accessor methods
size_t parallel_marking_threads() { return _parallel_marking_threads; }
size_t max_parallel_marking_threads() { return _max_parallel_marking_threads;}
uint parallel_marking_threads() { return _parallel_marking_threads; }
uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
double sleep_factor() { return _sleep_factor; }
double marking_task_overhead() { return _marking_task_overhead;}
double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
@ -514,7 +510,7 @@ protected:
HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; }
size_t active_tasks() { return _active_tasks; }
uint active_tasks() { return _active_tasks; }
ParallelTaskTerminator* terminator() { return &_terminator; }
// It claims the next available region to be scanned by a marking
@ -715,19 +711,18 @@ public:
// Returns the number of GC threads to be used in a concurrent
// phase based on the number of GC threads being used in a STW
// phase.
size_t scale_parallel_threads(size_t n_par_threads);
uint scale_parallel_threads(uint n_par_threads);
// Calculates the number of GC threads to be used in a concurrent phase.
size_t calc_parallel_marking_threads();
uint calc_parallel_marking_threads();
// The following three are interaction between CM and
// G1CollectedHeap
// This notifies CM that a root during initial-mark needs to be
// grayed and it's MT-safe. Currently, we just mark it. But, in the
// future, we can experiment with pushing it on the stack and we can
// do this without changing G1CollectedHeap.
void grayRoot(oop p);
// grayed. It is MT-safe.
inline void grayRoot(oop obj, size_t word_size);
// It's used during evacuation pauses to gray a region, if
// necessary, and it's MT-safe. It assumes that the caller has
// marked any objects on that region. If _should_gray_objects is
@ -735,6 +730,7 @@ public:
// pushed on the region stack, if it is located below the global
// finger, otherwise we do nothing.
void grayRegionIfNecessary(MemRegion mr);
// It's used during evacuation pauses to mark and, if necessary,
// gray a single object and it's MT-safe. It assumes the caller did
// not mark the object. If _should_gray_objects is true and we're
@ -791,24 +787,40 @@ public:
// Mark in the previous bitmap. NB: this is usually read-only, so use
// this carefully!
void markPrev(oop p);
inline void markPrev(oop p);
inline void markNext(oop p);
void clear(oop p);
// Clears marks for all objects in the given range, for both prev and
// next bitmaps. NB: the previous bitmap is usually read-only, so use
// this carefully!
void clearRangeBothMaps(MemRegion mr);
// Clears marks for all objects in the given range, for the prev,
// next, or both bitmaps. NB: the previous bitmap is usually
// read-only, so use this carefully!
void clearRangePrevBitmap(MemRegion mr);
void clearRangeNextBitmap(MemRegion mr);
void clearRangeBothBitmaps(MemRegion mr);
// Record the current top of the mark and region stacks; a
// subsequent oops_do() on the mark stack and
// invalidate_entries_into_cset() on the region stack will iterate
// only over indices valid at the time of this call.
void set_oops_do_bound() {
_markStack.set_oops_do_bound();
_regionStack.set_oops_do_bound();
// Notify data structures that a GC has started.
void note_start_of_gc() {
_markStack.note_start_of_gc();
}
// Notify data structures that a GC is finished.
void note_end_of_gc() {
_markStack.note_end_of_gc();
}
// Iterate over the oops in the mark stack and all local queues. It
// also calls invalidate_entries_into_cset() on the region stack.
void oops_do(OopClosure* f);
// Verify that there are no CSet oops on the stacks (taskqueues /
// global mark stack), enqueued SATB buffers, per-thread SATB
// buffers, and fingers (global / per-task). The boolean parameters
// decide which of the above data structures to verify. If marking
// is not in progress, it's a no-op.
void verify_no_cset_oops(bool verify_stacks,
bool verify_enqueued_buffers,
bool verify_thread_buffers,
bool verify_fingers) PRODUCT_RETURN;
// It is called at the end of an evacuation pause during marking so
// that CM is notified of where the new end of the heap is. It
// doesn't do anything if concurrent_marking_in_progress() is false,
@ -873,7 +885,7 @@ public:
return _prevMarkBitMap->isMarked(addr);
}
inline bool do_yield_check(int worker_i = 0);
inline bool do_yield_check(uint worker_i = 0);
inline bool should_yield();
// Called to abort the marking cycle after a Full GC takes palce.
@ -1166,6 +1178,7 @@ public:
// It keeps picking SATB buffers and processing them until no SATB
// buffers are available.
void drain_satb_buffers();
// It keeps popping regions from the region stack and processing
// them until the region stack is empty.
void drain_region_stack(BitMapClosure* closure);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -153,4 +153,46 @@ inline void CMTask::deal_with_reference(oop obj) {
}
}
inline void ConcurrentMark::markPrev(oop p) {
assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
}
inline void ConcurrentMark::markNext(oop p) {
assert(!_nextMarkBitMap->isMarked((HeapWord*) p), "sanity");
_nextMarkBitMap->mark((HeapWord*) p);
}
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
HeapWord* addr = (HeapWord*) obj;
// Currently we don't do anything with word_size but we will use it
// in the very near future in the liveness calculation piggy-backing
// changes.
#ifdef ASSERT
HeapRegion* hr = _g1h->heap_region_containing(addr);
assert(hr != NULL, "sanity");
assert(!hr->is_survivor(), "should not allocate survivors during IM");
assert(addr < hr->next_top_at_mark_start(),
err_msg("addr: "PTR_FORMAT" hr: "HR_FORMAT" NTAMS: "PTR_FORMAT,
addr, HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start()));
// We cannot assert that word_size == obj->size() given that obj
// might not be in a consistent state (another thread might be in
// the process of copying it). So the best thing we can do is to
// assert that word_size is under an upper bound which is its
// containing region's capacity.
assert(word_size * HeapWordSize <= hr->capacity(),
err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
word_size * HeapWordSize, hr->capacity(),
HR_FORMAT_PARAMS(hr)));
#endif // ASSERT
if (!_nextMarkBitMap->isMarked(addr)) {
_nextMarkBitMap->parMark(addr);
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -285,6 +285,14 @@ private:
// Typically, it is not full so we should re-use it during the next GC.
HeapRegion* _retained_old_gc_alloc_region;
// It specifies whether we should attempt to expand the heap after a
// region allocation failure. If heap expansion fails we set this to
// false so that we don't re-attempt the heap expansion (it's likely
// that subsequent expansion attempts will also fail if one fails).
// Currently, it is only consulted during GC and it's reset at the
// start of each GC.
bool _expand_heap_after_alloc_failure;
// It resets the mutator alloc region before new allocations can take place.
void init_mutator_alloc_region();
@ -861,8 +869,7 @@ protected:
void finalize_for_evac_failure();
// An attempt to evacuate "obj" has failed; take necessary steps.
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
bool should_mark_root);
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m);
// ("Weak") Reference processing support.
@ -954,7 +961,7 @@ protected:
unsigned int* _worker_cset_start_region_time_stamp;
enum G1H_process_strong_roots_tasks {
G1H_PS_mark_stack_oops_do,
G1H_PS_filter_satb_buffers,
G1H_PS_refProcessor_oops_do,
// Leave this one last.
G1H_PS_NumElements
@ -995,7 +1002,7 @@ public:
// Initialize weak reference processing.
virtual void ref_processing_init();
void set_par_threads(int t) {
void set_par_threads(uint t) {
SharedHeap::set_par_threads(t);
// Done in SharedHeap but oddly there are
// two _process_strong_tasks's in a G1CollectedHeap
@ -1298,13 +1305,17 @@ public:
// chunk.) For now requires that "doHeapRegion" always returns "false",
// i.e., that a closure never attempt to abort a traversal.
void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
int worker,
int no_of_par_workers,
uint worker,
uint no_of_par_workers,
jint claim_value);
// It resets all the region claim values to the default.
void reset_heap_region_claim_values();
// Resets the claim values of regions in the current
// collection set to the default.
void reset_cset_heap_region_claim_values();
#ifdef ASSERT
bool check_heap_region_claim_values(jint claim_value);
@ -1740,10 +1751,8 @@ public:
_gclab_word_size(gclab_word_size),
_real_start_word(NULL),
_real_end_word(NULL),
_start_word(NULL)
{
guarantee( size_in_words() >= bitmap_size_in_words(),
"just making sure");
_start_word(NULL) {
guarantee(false, "GCLabBitMap::GCLabBitmap(): don't call this any more");
}
inline unsigned heapWordToOffset(HeapWord* addr) {
@ -1797,6 +1806,8 @@ public:
}
void set_buffer(HeapWord* start) {
guarantee(false, "set_buffer(): don't call this any more");
guarantee(use_local_bitmaps, "invariant");
clear();
@ -1820,6 +1831,8 @@ public:
#endif // PRODUCT
void retire() {
guarantee(false, "retire(): don't call this any more");
guarantee(use_local_bitmaps, "invariant");
assert(fields_well_formed(), "invariant");
@ -1853,32 +1866,18 @@ public:
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
private:
bool _retired;
bool _should_mark_objects;
GCLabBitMap _bitmap;
public:
G1ParGCAllocBuffer(size_t gclab_word_size);
inline bool mark(HeapWord* addr) {
guarantee(use_local_bitmaps, "invariant");
assert(_should_mark_objects, "invariant");
return _bitmap.mark(addr);
}
inline void set_buf(HeapWord* buf) {
if (use_local_bitmaps && _should_mark_objects) {
_bitmap.set_buffer(buf);
}
void set_buf(HeapWord* buf) {
ParGCAllocBuffer::set_buf(buf);
_retired = false;
}
inline void retire(bool end_of_gc, bool retain) {
void retire(bool end_of_gc, bool retain) {
if (_retired)
return;
if (use_local_bitmaps && _should_mark_objects) {
_bitmap.retire();
}
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -136,7 +136,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_stop_world_start(0.0),
_all_stop_world_times_ms(new NumberSeq()),
_all_yield_times_ms(new NumberSeq()),
_using_new_ratio_calculations(false),
_summary(new Summary()),
@ -230,7 +229,9 @@ G1CollectorPolicy::G1CollectorPolicy() :
_inc_cset_bytes_used_before(0),
_inc_cset_max_finger(NULL),
_inc_cset_recorded_rs_lengths(0),
_inc_cset_recorded_rs_lengths_diffs(0),
_inc_cset_predicted_elapsed_time_ms(0.0),
_inc_cset_predicted_elapsed_time_ms_diffs(0.0),
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
@ -280,7 +281,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
_par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
_par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
_par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
_par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
@ -407,11 +408,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
initialize_all();
_collectionSetChooser = new CollectionSetChooser();
}
// Increment "i", mod "len"
static void inc_mod(int& i, int len) {
i++; if (i == len) i = 0;
_young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
}
void G1CollectorPolicy::initialize_flags() {
@ -423,39 +420,74 @@ void G1CollectorPolicy::initialize_flags() {
CollectorPolicy::initialize_flags();
}
// The easiest way to deal with the parsing of the NewSize /
// MaxNewSize / etc. parameteres is to re-use the code in the
// TwoGenerationCollectorPolicy class. This is similar to what
// ParallelScavenge does with its GenerationSizer class (see
// ParallelScavengeHeap::initialize()). We might change this in the
// future, but it's a good start.
class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
private:
size_t size_to_region_num(size_t byte_size) {
return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
if (FLAG_IS_CMDLINE(NewRatio)) {
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
} else {
_sizer_kind = SizerNewRatio;
_adaptive_size = false;
return;
}
}
public:
G1YoungGenSizer() {
initialize_flags();
initialize_size_info();
if (FLAG_IS_CMDLINE(NewSize)) {
_min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
if (FLAG_IS_CMDLINE(MaxNewSize)) {
_max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
_sizer_kind = SizerMaxAndNewSize;
_adaptive_size = _min_desired_young_length == _max_desired_young_length;
} else {
_sizer_kind = SizerNewSizeOnly;
}
} else if (FLAG_IS_CMDLINE(MaxNewSize)) {
_max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
_sizer_kind = SizerMaxNewSizeOnly;
}
size_t min_young_region_num() {
return size_to_region_num(_min_gen0_size);
}
size_t initial_young_region_num() {
return size_to_region_num(_initial_gen0_size);
}
size_t max_young_region_num() {
return size_to_region_num(_max_gen0_size);
}
};
}
void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
assert(number_of_heap_regions > 0, "Heap must be initialized");
size_t young_size = number_of_heap_regions / (NewRatio + 1);
_min_desired_young_length = young_size;
_max_desired_young_length = young_size;
size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
return MAX2((size_t)1, default_value);
}
size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
return MAX2((size_t)1, default_value);
}
void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
assert(new_number_of_heap_regions > 0, "Heap must be initialized");
switch (_sizer_kind) {
case SizerDefaults:
_min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
_max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
break;
case SizerNewSizeOnly:
_max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
_max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
break;
case SizerMaxNewSizeOnly:
_min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
_min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
break;
case SizerMaxAndNewSize:
// Do nothing. Values set on the command line, don't update them at runtime.
break;
case SizerNewRatio:
_min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
_max_desired_young_length = _min_desired_young_length;
break;
default:
ShouldNotReachHere();
}
assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
}
void G1CollectorPolicy::init() {
@ -466,28 +498,10 @@ void G1CollectorPolicy::init() {
initialize_gc_policy_counters();
G1YoungGenSizer sizer;
_min_desired_young_length = sizer.min_young_region_num();
_max_desired_young_length = sizer.max_young_region_num();
if (FLAG_IS_CMDLINE(NewRatio)) {
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
} else {
// Treat NewRatio as a fixed size that is only recalculated when the heap size changes
update_young_list_size_using_newratio(_g1->n_regions());
_using_new_ratio_calculations = true;
}
}
assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
if (adaptive_young_list_length()) {
_young_list_fixed_length = 0;
} else {
assert(_min_desired_young_length == _max_desired_young_length, "Min and max young size differ");
_young_list_fixed_length = _min_desired_young_length;
_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
}
_free_regions_at_end_of_collection = _g1->free_regions();
update_young_list_target_length();
@ -541,11 +555,7 @@ void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
// smaller than 1.0) we'll get 1.
_reserve_regions = (size_t) ceil(reserve_regions_d);
if (_using_new_ratio_calculations) {
// -XX:NewRatio was specified so we need to update the
// young gen length when the heap size has changed.
update_young_list_size_using_newratio(new_number_of_regions);
}
_young_gen_sizer->heap_size_changed(new_number_of_regions);
}
size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
@ -563,14 +573,14 @@ size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
}
desired_min_length += base_min_length;
// make sure we don't go below any user-defined minimum bound
return MAX2(_min_desired_young_length, desired_min_length);
return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
}
size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
// Here, we might want to also take into account any additional
// constraints (i.e., user-defined minimum bound). Currently, we
// effectively don't set this bound.
return _max_desired_young_length;
return _young_gen_sizer->max_desired_young_length();
}
void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
@ -895,10 +905,19 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
}
// We only need to do this here as the policy will only be applied
// to the GC we're about to start. so, no point is calculating this
// every time we calculate / recalculate the target young length.
update_survivors_policy();
if (!during_initial_mark_pause()) {
// We only need to do this here as the policy will only be applied
// to the GC we're about to start. so, no point is calculating this
// every time we calculate / recalculate the target young length.
update_survivors_policy();
} else {
// The marking phase has a "we only copy implicitly live
// objects during marking" invariant. The easiest way to ensure it
// holds is not to allocate any survivor regions and tenure all
// objects. In the future we might change this and handle survivor
// regions specially during marking.
tenure_all_objects();
}
assert(_g1->used() == _g1->recalculate_used(),
err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
@ -929,7 +948,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
for (int i = 0; i < _parallel_gc_threads; ++i) {
_par_last_gc_worker_start_times_ms[i] = -1234.0;
_par_last_ext_root_scan_times_ms[i] = -1234.0;
_par_last_mark_stack_scan_times_ms[i] = -1234.0;
_par_last_satb_filtering_times_ms[i] = -1234.0;
_par_last_update_rs_times_ms[i] = -1234.0;
_par_last_update_rs_processed_buffers[i] = -1234.0;
_par_last_scan_rs_times_ms[i] = -1234.0;
@ -1217,7 +1236,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
// of the PrintGCDetails output, in the non-parallel case.
double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
double update_rs_time = avg_value(_par_last_update_rs_times_ms);
double update_rs_processed_buffers =
sum_of_values(_par_last_update_rs_processed_buffers);
@ -1226,7 +1245,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
double termination_time = avg_value(_par_last_termination_times_ms);
double known_time = ext_root_scan_time +
mark_stack_scan_time +
satb_filtering_time +
update_rs_time +
scan_rs_time +
obj_copy_time;
@ -1272,7 +1291,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
body_summary->record_satb_filtering_time_ms(satb_filtering_time);
body_summary->record_update_rs_time_ms(update_rs_time);
body_summary->record_scan_rs_time_ms(scan_rs_time);
body_summary->record_obj_copy_time_ms(obj_copy_time);
@ -1366,16 +1385,12 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
(last_pause_included_initial_mark) ? " (initial-mark)" : "",
elapsed_ms / 1000.0);
if (print_marking_info) {
print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
}
if (parallel) {
print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
if (print_marking_info) {
print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
}
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
@ -1389,7 +1404,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
_par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
_par_last_mark_stack_scan_times_ms[i] +
_par_last_satb_filtering_times_ms[i] +
_par_last_update_rs_times_ms[i] +
_par_last_scan_rs_times_ms[i] +
_par_last_obj_copy_times_ms[i] +
@ -1402,7 +1417,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
} else {
print_stats(1, "Ext Root Scanning", ext_root_scan_time);
if (print_marking_info) {
print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
print_stats(1, "SATB Filtering", satb_filtering_time);
}
print_stats(1, "Update RS", update_rs_time);
print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
@ -1551,10 +1566,19 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
}
}
// It turns out that, sometimes, _max_rs_lengths can get smaller
// than _recorded_rs_lengths which causes rs_length_diff to get
// very large and mess up the RSet length predictions. We'll be
// defensive until we work out why this happens.
// This is defensive. For a while _max_rs_lengths could get
// smaller than _recorded_rs_lengths which was causing
// rs_length_diff to get very large and mess up the RSet length
// predictions. The reason was unsafe concurrent updates to the
// _inc_cset_recorded_rs_lengths field which the code below guards
// against (see CR 7118202). This bug has now been fixed (see CR
// 7119027). However, I'm still worried that
// _inc_cset_recorded_rs_lengths might still end up somewhat
// inaccurate. The concurrent refinement thread calculates an
// RSet's length concurrently with other CR threads updating it
// which might cause it to calculate the length incorrectly (if,
// say, it's in mid-coarsening). So I'll leave in the defensive
// conditional below just in case.
size_t rs_length_diff = 0;
if (_max_rs_lengths > _recorded_rs_lengths) {
rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
@ -1964,11 +1988,10 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
if (summary->get_total_seq()->num() > 0) {
print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
if (body_summary != NULL) {
print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
if (parallel) {
print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
print_summary(2, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
print_summary(2, "Update RS", body_summary->get_update_rs_seq());
print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
@ -1977,7 +2000,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
{
NumberSeq* other_parts[] = {
body_summary->get_ext_root_scan_seq(),
body_summary->get_mark_stack_scan_seq(),
body_summary->get_satb_filtering_seq(),
body_summary->get_update_rs_seq(),
body_summary->get_scan_rs_seq(),
body_summary->get_obj_copy_seq(),
@ -1990,7 +2013,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
}
} else {
print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
print_summary(1, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
print_summary(1, "Update RS", body_summary->get_update_rs_seq());
print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
@ -2017,7 +2040,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
body_summary->get_satb_drain_seq(),
body_summary->get_update_rs_seq(),
body_summary->get_ext_root_scan_seq(),
body_summary->get_mark_stack_scan_seq(),
body_summary->get_satb_filtering_seq(),
body_summary->get_scan_rs_seq(),
body_summary->get_obj_copy_seq()
};
@ -2321,17 +2344,19 @@ public:
_g1(G1CollectedHeap::heap())
{}
void work(int i) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
void work(uint worker_id) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
_chunk_size,
worker_id);
// Back to zero for the claim value.
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
_g1->workers()->active_workers(),
HeapRegion::InitialClaimValue);
jint regions_added = parKnownGarbageCl.marked_regions_added();
_hrSorted->incNumMarkedHeapRegions(regions_added);
if (G1PrintParCleanupStats) {
gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
i, parKnownGarbageCl.invokes(), regions_added);
worker_id, parKnownGarbageCl.invokes(), regions_added);
}
}
};
@ -2412,9 +2437,6 @@ void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
assert(_inc_cset_build_state == Active, "Precondition");
assert(!hr->is_young(), "non-incremental add of young region");
if (_g1->mark_in_progress())
_g1->concurrent_mark()->registerCSetRegion(hr);
assert(!hr->in_collection_set(), "should not already be in the CSet");
hr->set_in_collection_set(true);
hr->set_next_in_collection_set(_collection_set);
@ -2436,10 +2458,45 @@ void G1CollectorPolicy::start_incremental_cset_building() {
_inc_cset_max_finger = 0;
_inc_cset_recorded_rs_lengths = 0;
_inc_cset_predicted_elapsed_time_ms = 0;
_inc_cset_recorded_rs_lengths_diffs = 0;
_inc_cset_predicted_elapsed_time_ms = 0.0;
_inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
_inc_cset_build_state = Active;
}
void G1CollectorPolicy::finalize_incremental_cset_building() {
assert(_inc_cset_build_state == Active, "Precondition");
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
// The two "main" fields, _inc_cset_recorded_rs_lengths and
// _inc_cset_predicted_elapsed_time_ms, are updated by the thread
// that adds a new region to the CSet. Further updates by the
// concurrent refinement thread that samples the young RSet lengths
// are accumulated in the *_diffs fields. Here we add the diffs to
// the "main" fields.
if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
_inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
} else {
// This is defensive. The diff should in theory be always positive
// as RSets can only grow between GCs. However, given that we
// sample their size concurrently with other threads updating them
// it's possible that we might get the wrong size back, which
// could make the calculations somewhat inaccurate.
size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
if (_inc_cset_recorded_rs_lengths >= diffs) {
_inc_cset_recorded_rs_lengths -= diffs;
} else {
_inc_cset_recorded_rs_lengths = 0;
}
}
_inc_cset_predicted_elapsed_time_ms +=
_inc_cset_predicted_elapsed_time_ms_diffs;
_inc_cset_recorded_rs_lengths_diffs = 0;
_inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
}
void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
@ -2455,10 +2512,8 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
size_t used_bytes = hr->used();
_inc_cset_recorded_rs_lengths += rs_length;
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
_inc_cset_bytes_used_before += used_bytes;
// Cache the values we have added to the aggregated informtion
@ -2469,37 +2524,33 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
}
void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
// This routine is currently only called as part of the updating of
// existing policy information for regions in the incremental cset that
// is performed by the concurrent refine thread(s) as part of young list
// RSet sampling. Therefore we should not be at a safepoint.
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
assert(hr->is_young(), "it should be");
size_t used_bytes = hr->used();
size_t old_rs_length = hr->recorded_rs_length();
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
// Subtract the old recorded/predicted policy information for
// the given heap region from the collection set info.
_inc_cset_recorded_rs_lengths -= old_rs_length;
_inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
_inc_cset_bytes_used_before -= used_bytes;
// Clear the values cached in the heap region
hr->set_recorded_rs_length(0);
hr->set_predicted_elapsed_time_ms(0);
}
void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
// Update the collection set information that is dependent on the new RS length
void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
size_t new_rs_length) {
// Update the CSet information that is dependent on the new RS length
assert(hr->is_young(), "Precondition");
assert(!SafepointSynchronize::is_at_safepoint(),
"should not be at a safepoint");
remove_from_incremental_cset_info(hr);
add_to_incremental_cset_info(hr, new_rs_length);
// We could have updated _inc_cset_recorded_rs_lengths and
// _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
// that atomically, as this code is executed by a concurrent
// refinement thread, potentially concurrently with a mutator thread
// allocating a new region and also updating the same fields. To
// avoid the atomic operations we accumulate these updates on two
// separate fields (*_diffs) and we'll just add them to the "main"
// fields at the start of a GC.
ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
_inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
_inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
hr->set_recorded_rs_length(new_rs_length);
hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
}
void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
@ -2591,6 +2642,7 @@ void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
double non_young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list();
finalize_incremental_cset_building();
guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive",
@ -2654,9 +2706,6 @@ void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
// Clear the fields that point to the survivor list - they are all young now.
young_list->clear_survivors();
if (_g1->mark_in_progress())
_g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
_collection_set = _inc_cset_head;
_collection_set_bytes_used_before = _inc_cset_bytes_used_before;
time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ class MainBodySummary: public CHeapObj {
define_num_seq(satb_drain) // optional
define_num_seq(parallel) // parallel only
define_num_seq(ext_root_scan)
define_num_seq(mark_stack_scan)
define_num_seq(satb_filtering)
define_num_seq(update_rs)
define_num_seq(scan_rs)
define_num_seq(obj_copy)
@ -83,6 +83,72 @@ public:
virtual MainBodySummary* main_body_summary() { return this; }
};
// There are three command line options related to the young gen size:
// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
// just a short form for NewSize==MaxNewSize). G1 will use its internal
// heuristics to calculate the actual young gen size, so these options
// basically only limit the range within which G1 can pick a young gen
// size. Also, these are general options taking byte sizes. G1 will
// internally work with a number of regions instead. So, some rounding
// will occur.
//
// If nothing related to the the young gen size is set on the command
// line we should allow the young gen to be between
// G1DefaultMinNewGenPercent and G1DefaultMaxNewGenPercent of the
// heap size. This means that every time the heap size changes the
// limits for the young gen size will be updated.
//
// If only -XX:NewSize is set we should use the specified value as the
// minimum size for young gen. Still using G1DefaultMaxNewGenPercent
// of the heap as maximum.
//
// If only -XX:MaxNewSize is set we should use the specified value as the
// maximum size for young gen. Still using G1DefaultMinNewGenPercent
// of the heap as minimum.
//
// If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
// No updates when the heap size changes. There is a special case when
// NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
// different heuristic for calculating the collection set when we do mixed
// collection.
//
// If only -XX:NewRatio is set we should use the specified ratio of the heap
// as both min and max. This will be interpreted as "fixed" just like the
// NewSize==MaxNewSize case above. But we will update the min and max
// everytime the heap size changes.
//
// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
// combined with either NewSize or MaxNewSize. (A warning message is printed.)
class G1YoungGenSizer : public CHeapObj {
private:
enum SizerKind {
SizerDefaults,
SizerNewSizeOnly,
SizerMaxNewSizeOnly,
SizerMaxAndNewSize,
SizerNewRatio
};
SizerKind _sizer_kind;
size_t _min_desired_young_length;
size_t _max_desired_young_length;
bool _adaptive_size;
size_t calculate_default_min_length(size_t new_number_of_heap_regions);
size_t calculate_default_max_length(size_t new_number_of_heap_regions);
public:
G1YoungGenSizer();
void heap_size_changed(size_t new_number_of_heap_regions);
size_t min_desired_young_length() {
return _min_desired_young_length;
}
size_t max_desired_young_length() {
return _max_desired_young_length;
}
bool adaptive_young_list_length() {
return _adaptive_size;
}
};
class G1CollectorPolicy: public CollectorPolicy {
private:
// either equal to the number of parallel threads, if ParallelGCThreads
@ -149,7 +215,7 @@ private:
double* _par_last_gc_worker_start_times_ms;
double* _par_last_ext_root_scan_times_ms;
double* _par_last_mark_stack_scan_times_ms;
double* _par_last_satb_filtering_times_ms;
double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers;
double* _par_last_scan_rs_times_ms;
@ -167,9 +233,6 @@ private:
// indicates whether we are in young or mixed GC mode
bool _gcs_are_young;
// if true, then it tries to dynamically adjust the length of the
// young list
bool _adaptive_young_list_length;
size_t _young_list_target_length;
size_t _young_list_fixed_length;
size_t _prev_eden_capacity; // used for logging
@ -227,9 +290,7 @@ private:
TruncatedSeq* _young_gc_eff_seq;
bool _using_new_ratio_calculations;
size_t _min_desired_young_length; // as set on the command line or default calculations
size_t _max_desired_young_length; // as set on the command line or default calculations
G1YoungGenSizer* _young_gen_sizer;
size_t _eden_cset_region_length;
size_t _survivor_cset_region_length;
@ -588,16 +649,29 @@ private:
// Used to record the highest end of heap region in collection set
HeapWord* _inc_cset_max_finger;
// The RSet lengths recorded for regions in the collection set
// (updated by the periodic sampling of the regions in the
// young list/collection set).
// The RSet lengths recorded for regions in the CSet. It is updated
// by the thread that adds a new region to the CSet. We assume that
// only one thread can be allocating a new CSet region (currently,
// it does so after taking the Heap_lock) hence no need to
// synchronize updates to this field.
size_t _inc_cset_recorded_rs_lengths;
// The predicted elapsed time it will take to collect the regions
// in the collection set (updated by the periodic sampling of the
// regions in the young list/collection set).
// A concurrent refinement thread periodcially samples the young
// region RSets and needs to update _inc_cset_recorded_rs_lengths as
// the RSets grow. Instead of having to syncronize updates to that
// field we accumulate them in this field and add it to
// _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
ssize_t _inc_cset_recorded_rs_lengths_diffs;
// The predicted elapsed time it will take to collect the regions in
// the CSet. This is updated by the thread that adds a new region to
// the CSet. See the comment for _inc_cset_recorded_rs_lengths about
// MT-safety assumptions.
double _inc_cset_predicted_elapsed_time_ms;
// See the comment for _inc_cset_recorded_rs_lengths_diffs.
double _inc_cset_predicted_elapsed_time_ms_diffs;
// Stash a pointer to the g1 heap.
G1CollectedHeap* _g1;
@ -682,8 +756,6 @@ private:
// Count the number of bytes used in the CS.
void count_CS_bytes_used();
void update_young_list_size_using_newratio(size_t number_of_heap_regions);
public:
G1CollectorPolicy();
@ -710,8 +782,6 @@ public:
// This should be called after the heap is resized.
void record_new_heap_size(size_t new_number_of_regions);
public:
void init();
// Create jstat counters for the policy.
@ -771,8 +841,8 @@ public:
_par_last_ext_root_scan_times_ms[worker_i] = ms;
}
void record_mark_stack_scan_time(int worker_i, double ms) {
_par_last_mark_stack_scan_times_ms[worker_i] = ms;
void record_satb_filtering_time(int worker_i, double ms) {
_par_last_satb_filtering_times_ms[worker_i] = ms;
}
void record_satb_drain_time(double ms) {
@ -894,6 +964,10 @@ public:
// Initialize incremental collection set info.
void start_incremental_cset_building();
// Perform any final calculations on the incremental CSet fields
// before we can use them.
void finalize_incremental_cset_building();
void clear_incremental_cset() {
_inc_cset_head = NULL;
_inc_cset_tail = NULL;
@ -902,10 +976,9 @@ public:
// Stop adding regions to the incremental collection set
void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
// Add/remove information about hr to the aggregated information
// for the incrementally built collection set.
// Add information about hr to the aggregated information for the
// incrementally built collection set.
void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
void remove_from_incremental_cset_info(HeapRegion* hr);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
@ -998,10 +1071,7 @@ public:
}
bool adaptive_young_list_length() {
return _adaptive_young_list_length;
}
void set_adaptive_young_list_length(bool adaptive_young_list_length) {
_adaptive_young_list_length = adaptive_young_list_length;
return _young_gen_sizer->adaptive_young_list_length();
}
inline double get_gc_eff_factor() {
@ -1076,6 +1146,11 @@ public:
_survivor_surv_rate_group->stop_adding_regions();
}
void tenure_all_objects() {
_max_survivor_regions = 0;
_tenuring_threshold = 0;
}
void record_survivor_regions(size_t regions,
HeapRegion* head,
HeapRegion* tail) {

View File

@ -0,0 +1,236 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
#include "gc_implementation/g1/concurrentMark.inline.hpp"
#include "gc_implementation/g1/dirtyCardQueue.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1_globals.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "utilities/workgroup.hpp"
// Closures and tasks associated with any self-forwarding pointers
// installed as a result of an evacuation failure.
class UpdateRSetDeferred : public OopsInHeapRegionClosure {
private:
G1CollectedHeap* _g1;
DirtyCardQueue *_dcq;
CardTableModRefBS* _ct_bs;
public:
UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
assert(_from->is_in_reserved(p), "paranoia");
if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
!_from->is_survivor()) {
size_t card_index = _ct_bs->index_for(p);
if (_ct_bs->mark_card_deferred(card_index)) {
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
}
}
}
};
class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
private:
G1CollectedHeap* _g1;
ConcurrentMark* _cm;
HeapRegion* _hr;
size_t _marked_bytes;
OopsInHeapRegionClosure *_update_rset_cl;
bool _during_initial_mark;
bool _during_conc_mark;
public:
RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
HeapRegion* hr,
OopsInHeapRegionClosure* update_rset_cl,
bool during_initial_mark,
bool during_conc_mark) :
_g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
_update_rset_cl(update_rset_cl),
_during_initial_mark(during_initial_mark),
_during_conc_mark(during_conc_mark) { }
size_t marked_bytes() { return _marked_bytes; }
// <original comment>
// The original idea here was to coalesce evacuated and dead objects.
// However that caused complications with the block offset table (BOT).
// In particular if there were two TLABs, one of them partially refined.
// |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
// The BOT entries of the unrefined part of TLAB_2 point to the start
// of TLAB_2. If the last object of the TLAB_1 and the first object
// of TLAB_2 are coalesced, then the cards of the unrefined part
// would point into middle of the filler object.
// The current approach is to not coalesce and leave the BOT contents intact.
// </original comment>
//
// We now reset the BOT when we start the object iteration over the
// region and refine its entries for every object we come across. So
// the above comment is not really relevant and we should be able
// to coalesce dead objects if we want to.
void do_object(oop obj) {
HeapWord* obj_addr = (HeapWord*) obj;
assert(_hr->is_in(obj_addr), "sanity");
size_t obj_size = obj->size();
_hr->update_bot_for_object(obj_addr, obj_size);
if (obj->is_forwarded() && obj->forwardee() == obj) {
// The object failed to move.
// We consider all objects that we find self-forwarded to be
// live. What we'll do is that we'll update the prev marking
// info so that they are all under PTAMS and explicitly marked.
_cm->markPrev(obj);
if (_during_initial_mark) {
// For the next marking info we'll only mark the
// self-forwarded objects explicitly if we are during
// initial-mark (since, normally, we only mark objects pointed
// to by roots if we succeed in copying them). By marking all
// self-forwarded objects we ensure that we mark any that are
// still pointed to be roots. During concurrent marking, and
// after initial-mark, we don't need to mark any objects
// explicitly and all objects in the CSet are considered
// (implicitly) live. So, we won't mark them explicitly and
// we'll leave them over NTAMS.
_cm->markNext(obj);
}
_marked_bytes += (obj_size * HeapWordSize);
obj->set_mark(markOopDesc::prototype());
// While we were processing RSet buffers during the collection,
// we actually didn't scan any cards on the collection set,
// since we didn't want to update remembered sets with entries
// that point into the collection set, given that live objects
// from the collection set are about to move and such entries
// will be stale very soon.
// This change also dealt with a reliability issue which
// involved scanning a card in the collection set and coming
// across an array that was being chunked and looking malformed.
// The problem is that, if evacuation fails, we might have
// remembered set entries missing given that we skipped cards on
// the collection set. So, we'll recreate such entries now.
obj->oop_iterate(_update_rset_cl);
assert(_cm->isPrevMarked(obj), "Should be marked!");
} else {
// The object has been either evacuated or is dead. Fill it with a
// dummy object.
MemRegion mr((HeapWord*) obj, obj_size);
CollectedHeap::fill_with_object(mr);
}
}
};
class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
OopsInHeapRegionClosure *_update_rset_cl;
public:
RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
OopsInHeapRegionClosure* update_rset_cl) :
_g1h(g1h), _update_rset_cl(update_rset_cl),
_cm(_g1h->concurrent_mark()) { }
bool doHeapRegion(HeapRegion *hr) {
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
bool during_conc_mark = _g1h->mark_in_progress();
assert(!hr->isHumongous(), "sanity");
assert(hr->in_collection_set(), "bad CS");
if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
if (hr->evacuation_failed()) {
RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
during_initial_mark,
during_conc_mark);
MemRegion mr(hr->bottom(), hr->end());
// We'll recreate the prev marking info so we'll first clear
// the prev bitmap range for this region. We never mark any
// CSet objects explicitly so the next bitmap range should be
// cleared anyway.
_cm->clearRangePrevBitmap(mr);
hr->note_self_forwarding_removal_start(during_initial_mark,
during_conc_mark);
// In the common case (i.e. when there is no evacuation
// failure) we make sure that the following is done when
// the region is freed so that it is "ready-to-go" when it's
// re-allocated. However, when evacuation failure happens, a
// region will remain in the heap and might ultimately be added
// to a CSet in the future. So we have to be careful here and
// make sure the region's RSet is ready for parallel iteration
// whenever this might be required in the future.
hr->rem_set()->reset_for_par_iteration();
hr->reset_bot();
_update_rset_cl->set_region(hr);
hr->object_iterate(&rspc);
hr->note_self_forwarding_removal_end(during_initial_mark,
during_conc_mark,
rspc.marked_bytes());
}
}
return false;
}
};
class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
public:
G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
AbstractGangTask("G1 Remove Self-forwarding Pointers"),
_g1h(g1h) { }
void work(uint worker_id) {
UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
UpdateRSetDeferred deferred_update(_g1h, &dcq);
OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
if (!G1DeferredRSUpdate) {
update_rset_cl = &immediate_update;
}
RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl);
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
_g1h->collection_set_iterate_from(hr, &rsfp_cl);
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -121,17 +121,25 @@ public:
class G1ParCopyHelper : public G1ParClosureSuper {
G1ParScanClosure *_scanner;
protected:
template <class T> void mark_object(T* p);
oop copy_to_survivor_space(oop obj, bool should_mark_root,
bool should_mark_copy);
// Mark the object if it's not already marked. This is used to mark
// objects pointed to by roots that are guaranteed not to move
// during the GC (i.e., non-CSet objects). It is MT-safe.
void mark_object(oop obj);
// Mark the object if it's not already marked. This is used to mark
// objects pointed to by roots that have been forwarded during a
// GC. It is MT-safe.
void mark_forwarded_object(oop from_obj, oop to_obj);
oop copy_to_survivor_space(oop obj);
public:
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
G1ParScanClosure *scanner) :
G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { }
};
template<bool do_gen_barrier, G1Barrier barrier,
bool do_mark_object>
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure : public G1ParCopyHelper {
G1ParScanClosure _scanner;
@ -140,9 +148,8 @@ class G1ParCopyClosure : public G1ParCopyHelper {
public:
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
ReferenceProcessor* rp) :
_scanner(g1, par_scan_state, rp),
G1ParCopyHelper(g1, par_scan_state, &_scanner)
{
_scanner(g1, par_scan_state, rp),
G1ParCopyHelper(g1, par_scan_state, &_scanner) {
assert(_ref_processor == NULL, "sanity");
}

View File

@ -558,11 +558,11 @@ void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
}
void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val) {
uint worker_num, int claim_val) {
ScrubRSClosure scrub_cl(region_bm, card_bm);
_g1->heap_region_par_iterate_chunked(&scrub_cl,
worker_num,
(int) n_workers(),
n_workers(),
claim_val);
}

View File

@ -40,7 +40,7 @@ class G1RemSet: public CHeapObj {
protected:
G1CollectedHeap* _g1;
unsigned _conc_refine_cards;
size_t n_workers();
uint n_workers();
protected:
enum SomePrivateConstants {
@ -122,7 +122,7 @@ public:
// parallel thread id of the current thread, and "claim_val" is the
// value that should be used to claim heap regions.
void scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val);
uint worker_num, int claim_val);
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
// join and leave around parts that must be atomic wrt GC. (NULL means

View File

@ -29,7 +29,7 @@
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "oops/oop.inline.hpp"
inline size_t G1RemSet::n_workers() {
inline uint G1RemSet::n_workers() {
if (_g1->workers() != NULL) {
return _g1->workers()->total_workers();
} else {

View File

@ -289,7 +289,15 @@
\
develop(uintx, G1ConcMarkForceOverflow, 0, \
"The number of times we'll force an overflow during " \
"concurrent marking")
"concurrent marking") \
\
develop(uintx, G1DefaultMinNewGenPercent, 20, \
"Percentage (0-100) of the heap size to use as minimum " \
"young gen size.") \
\
develop(uintx, G1DefaultMaxNewGenPercent, 80, \
"Percentage (0-100) of the heap size to use as maximum " \
"young gen size.")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,7 +94,8 @@ public:
#endif // PRODUCT
}
template <class T> void do_oop_work(T* p) {
template <class T>
void do_oop_work(T* p) {
assert(_containing_obj != NULL, "Precondition");
assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
"Precondition");
@ -102,8 +103,10 @@ public:
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
bool failed = false;
if (!_g1h->is_in_closed_subset(obj) ||
_g1h->is_obj_dead_cond(obj, _vo)) {
if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
MutexLockerEx x(ParGCRareEvent_lock,
Mutex::_no_safepoint_check_flag);
if (!_failures) {
gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("----------");
@ -133,6 +136,7 @@ public:
print_object(gclog_or_tty, obj);
}
gclog_or_tty->print_cr("----------");
gclog_or_tty->flush();
_failures = true;
failed = true;
_n_failures++;
@ -155,6 +159,9 @@ public:
cv_field == dirty
: cv_obj == dirty || cv_field == dirty));
if (is_bad) {
MutexLockerEx x(ParGCRareEvent_lock,
Mutex::_no_safepoint_check_flag);
if (!_failures) {
gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("----------");
@ -174,6 +181,7 @@ public:
gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
cv_obj, cv_field);
gclog_or_tty->print_cr("----------");
gclog_or_tty->flush();
_failures = true;
if (!failed) _n_failures++;
}
@ -567,6 +575,40 @@ void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
}
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
bool during_conc_mark) {
// We always recreate the prev marking info and we'll explicitly
// mark all objects we find to be self-forwarded on the prev
// bitmap. So all objects need to be below PTAMS.
_prev_top_at_mark_start = top();
_prev_marked_bytes = 0;
if (during_initial_mark) {
// During initial-mark, we'll also explicitly mark all objects
// we find to be self-forwarded on the next bitmap. So all
// objects need to be below NTAMS.
_next_top_at_mark_start = top();
set_top_at_conc_mark_count(bottom());
_next_marked_bytes = 0;
} else if (during_conc_mark) {
// During concurrent mark, all objects in the CSet (including
// the ones we find to be self-forwarded) are implicitly live.
// So all objects need to be above NTAMS.
_next_top_at_mark_start = bottom();
set_top_at_conc_mark_count(bottom());
_next_marked_bytes = 0;
}
}
void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
bool during_conc_mark,
size_t marked_bytes) {
assert(0 <= marked_bytes && marked_bytes <= used(),
err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
marked_bytes, used()));
_prev_marked_bytes = marked_bytes;
}
HeapWord*
HeapRegion::object_iterate_mem_careful(MemRegion mr,
ObjectClosure* cl) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -373,7 +373,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
ScrubRemSetClaimValue = 3,
ParVerifyClaimValue = 4,
RebuildRSClaimValue = 5,
CompleteMarkCSetClaimValue = 6
CompleteMarkCSetClaimValue = 6,
ParEvacFailureClaimValue = 7
};
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
@ -582,37 +583,33 @@ class HeapRegion: public G1OffsetTableContigSpace {
// that the collector is about to start or has finished (concurrently)
// marking the heap.
// Note the start of a marking phase. Record the
// start of the unmarked area of the region here.
void note_start_of_marking(bool during_initial_mark) {
init_top_at_conc_mark_count();
_next_marked_bytes = 0;
if (during_initial_mark && is_young() && !is_survivor())
_next_top_at_mark_start = bottom();
else
_next_top_at_mark_start = top();
}
// Notify the region that concurrent marking is starting. Initialize
// all fields related to the next marking info.
inline void note_start_of_marking();
// Note the end of a marking phase. Install the start of
// the unmarked area that was captured at start of marking.
void note_end_of_marking() {
_prev_top_at_mark_start = _next_top_at_mark_start;
_prev_marked_bytes = _next_marked_bytes;
_next_marked_bytes = 0;
// Notify the region that concurrent marking has finished. Copy the
// (now finalized) next marking info fields into the prev marking
// info fields.
inline void note_end_of_marking();
guarantee(_prev_marked_bytes <=
(size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
"invariant");
}
// Notify the region that it will be used as to-space during a GC
// and we are about to start copying objects into it.
inline void note_start_of_copying(bool during_initial_mark);
// After an evacuation, we need to update _next_top_at_mark_start
// to be the current top. Note this is only valid if we have only
// ever evacuated into this region. If we evacuate, allocate, and
// then evacuate we are in deep doodoo.
void note_end_of_copying() {
assert(top() >= _next_top_at_mark_start, "Increase only");
_next_top_at_mark_start = top();
}
// Notify the region that it ceases being to-space during a GC and
// we will not copy objects into it any more.
inline void note_end_of_copying(bool during_initial_mark);
// Notify the region that we are about to start processing
// self-forwarded objects during evac failure handling.
void note_self_forwarding_removal_start(bool during_initial_mark,
bool during_conc_mark);
// Notify the region that we have finished processing self-forwarded
// objects during evac failure handling.
void note_self_forwarding_removal_end(bool during_initial_mark,
bool during_conc_mark,
size_t marked_bytes);
// Returns "false" iff no object in the region was allocated when the
// last mark phase ended.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,4 +55,71 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
return _offsets.block_start_const(p);
}
inline void HeapRegion::note_start_of_marking() {
init_top_at_conc_mark_count();
_next_marked_bytes = 0;
_next_top_at_mark_start = top();
}
inline void HeapRegion::note_end_of_marking() {
_prev_top_at_mark_start = _next_top_at_mark_start;
_prev_marked_bytes = _next_marked_bytes;
_next_marked_bytes = 0;
assert(_prev_marked_bytes <=
(size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
HeapWordSize, "invariant");
}
inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
if (during_initial_mark) {
if (is_survivor()) {
assert(false, "should not allocate survivors during IM");
} else {
// During initial-mark we'll explicitly mark any objects on old
// regions that are pointed to by roots. Given that explicit
// marks only make sense under NTAMS it'd be nice if we could
// check that condition if we wanted to. Given that we don't
// know where the top of this region will end up, we simply set
// NTAMS to the end of the region so all marks will be below
// NTAMS. We'll set it to the actual top when we retire this region.
_next_top_at_mark_start = end();
}
} else {
if (is_survivor()) {
// This is how we always allocate survivors.
assert(_next_top_at_mark_start == bottom(), "invariant");
} else {
// We could have re-used this old region as to-space over a
// couple of GCs since the start of the concurrent marking
// cycle. This means that [bottom,NTAMS) will contain objects
// copied up to and including initial-mark and [NTAMS, top)
// will contain objects copied during the concurrent marking cycle.
assert(top() >= _next_top_at_mark_start, "invariant");
}
}
}
inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
if (during_initial_mark) {
if (is_survivor()) {
assert(false, "should not allocate survivors during IM");
} else {
// See the comment for note_start_of_copying() for the details
// on this.
assert(_next_top_at_mark_start == end(), "pre-condition");
_next_top_at_mark_start = top();
}
} else {
if (is_survivor()) {
// This is how we always allocate survivors.
assert(_next_top_at_mark_start == bottom(), "invariant");
} else {
// See the comment for note_start_of_copying() for the details
// on this.
assert(top() >= _next_top_at_mark_start, "invariant");
}
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,7 @@ public:
// given PtrQueueSet.
PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
// Release any contained resources.
void flush();
virtual void flush();
// Calls flush() when destroyed.
~PtrQueue() { flush(); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,14 @@
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
void ObjPtrQueue::flush() {
// The buffer might contain refs into the CSet. We have to filter it
// first before we flush it, otherwise we might end up with an
// enqueued buffer with refs into the CSet which breaks our invariants.
filter();
PtrQueue::flush();
}
// This method removes entries from an SATB buffer that will not be
// useful to the concurrent marking threads. An entry is removed if it
// satisfies one of the following conditions:
@ -44,38 +52,27 @@
// process it again).
//
// The rest of the entries will be retained and are compacted towards
// the top of the buffer. If with this filtering we clear a large
// enough chunk of the buffer we can re-use it (instead of enqueueing
// it) and we can just allow the mutator to carry on executing.
bool ObjPtrQueue::should_enqueue_buffer() {
assert(_lock == NULL || _lock->owned_by_self(),
"we should have taken the lock before calling this");
// A value of 0 means "don't filter SATB buffers".
if (G1SATBBufferEnqueueingThresholdPercent == 0) {
return true;
}
// the top of the buffer. Note that, because we do not allow old
// regions in the CSet during marking, all objects on the CSet regions
// are young (eden or survivors) and therefore implicitly live. So any
// references into the CSet will be removed during filtering.
void ObjPtrQueue::filter() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// This method should only be called if there is a non-NULL buffer
// that is full.
assert(_index == 0, "pre-condition");
assert(_buf != NULL, "pre-condition");
void** buf = _buf;
size_t sz = _sz;
if (buf == NULL) {
// nothing to do
return;
}
// Used for sanity checking at the end of the loop.
debug_only(size_t entries = 0; size_t retained = 0;)
size_t i = sz;
size_t new_index = sz;
// Given that we are expecting _index == 0, we could have changed
// the loop condition to (i > 0). But we are using _index for
// generality.
while (i > _index) {
assert(i > 0, "we should have at least one more entry to process");
i -= oopSize;
@ -103,20 +100,56 @@ bool ObjPtrQueue::should_enqueue_buffer() {
debug_only(retained += 1;)
}
}
#ifdef ASSERT
size_t entries_calc = (sz - _index) / oopSize;
assert(entries == entries_calc, "the number of entries we counted "
"should match the number of entries we calculated");
size_t retained_calc = (sz - new_index) / oopSize;
assert(retained == retained_calc, "the number of retained entries we counted "
"should match the number of retained entries we calculated");
size_t perc = retained_calc * 100 / entries_calc;
bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
_index = new_index;
#endif // ASSERT
_index = new_index;
}
// This method will first apply the above filtering to the buffer. If
// post-filtering a large enough chunk of the buffer has been cleared
// we can re-use the buffer (instead of enqueueing it) and we can just
// allow the mutator to carry on executing using the same buffer
// instead of replacing it.
bool ObjPtrQueue::should_enqueue_buffer() {
assert(_lock == NULL || _lock->owned_by_self(),
"we should have taken the lock before calling this");
// Even if G1SATBBufferEnqueueingThresholdPercent == 0 we have to
// filter the buffer given that this will remove any references into
// the CSet as we currently assume that no such refs will appear in
// enqueued buffers.
// This method should only be called if there is a non-NULL buffer
// that is full.
assert(_index == 0, "pre-condition");
assert(_buf != NULL, "pre-condition");
filter();
size_t sz = _sz;
size_t all_entries = sz / oopSize;
size_t retained_entries = (sz - _index) / oopSize;
size_t perc = retained_entries * 100 / all_entries;
bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
return should_enqueue;
}
void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
if (_buf != NULL) {
apply_closure_to_buffer(cl, _buf, _index, _sz);
}
}
void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) {
if (_buf != NULL) {
apply_closure_to_buffer(cl, _buf, _index, _sz);
_index = _sz;
@ -135,6 +168,21 @@ void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
}
}
#ifndef PRODUCT
// Helpful for debugging
void ObjPtrQueue::print(const char* name) {
print(name, _buf, _index, _sz);
}
void ObjPtrQueue::print(const char* name,
void** buf, size_t index, size_t sz) {
gclog_or_tty->print_cr(" SATB BUFFER [%s] buf: "PTR_FORMAT" "
"index: "SIZE_FORMAT" sz: "SIZE_FORMAT,
name, buf, index, sz);
}
#endif // PRODUCT
#ifdef ASSERT
void ObjPtrQueue::verify_oops_in_buffer() {
if (_buf == NULL) return;
@ -150,12 +198,9 @@ void ObjPtrQueue::verify_oops_in_buffer() {
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
SATBMarkQueueSet::SATBMarkQueueSet() :
PtrQueueSet(),
_closure(NULL), _par_closures(NULL),
_shared_satb_queue(this, true /*perm*/)
{}
PtrQueueSet(), _closure(NULL), _par_closures(NULL),
_shared_satb_queue(this, true /*perm*/) { }
void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
int process_completed_threshold,
@ -167,7 +212,6 @@ void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
}
}
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
t->satb_mark_queue().handle_zero_index();
@ -228,6 +272,13 @@ void SATBMarkQueueSet::set_active_all_threads(bool b,
}
}
void SATBMarkQueueSet::filter_thread_buffers() {
for(JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().filter();
}
shared_satb_queue()->filter();
}
void SATBMarkQueueSet::set_closure(ObjectClosure* closure) {
_closure = closure;
}
@ -239,9 +290,9 @@ void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
void SATBMarkQueueSet::iterate_closure_all_threads() {
for(JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().apply_closure(_closure);
t->satb_mark_queue().apply_closure_and_empty(_closure);
}
shared_satb_queue()->apply_closure(_closure);
shared_satb_queue()->apply_closure_and_empty(_closure);
}
void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
@ -250,7 +301,7 @@ void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
for(JavaThread* t = Threads::first(); t; t = t->next()) {
if (t->claim_oops_do(true, parity)) {
t->satb_mark_queue().apply_closure(_par_closures[worker]);
t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
}
}
@ -264,7 +315,7 @@ void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
VMThread* vmt = VMThread::vm_thread();
if (vmt->claim_oops_do(true, parity)) {
shared_satb_queue()->apply_closure(_par_closures[worker]);
shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
}
}
@ -292,6 +343,61 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
}
}
void SATBMarkQueueSet::iterate_completed_buffers_read_only(ObjectClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
assert(cl != NULL, "pre-condition");
BufferNode* nd = _completed_buffers_head;
while (nd != NULL) {
void** buf = BufferNode::make_buffer_from_node(nd);
ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
nd = nd->next();
}
}
void SATBMarkQueueSet::iterate_thread_buffers_read_only(ObjectClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
assert(cl != NULL, "pre-condition");
for (JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().apply_closure(cl);
}
shared_satb_queue()->apply_closure(cl);
}
#ifndef PRODUCT
// Helpful for debugging
#define SATB_PRINTER_BUFFER_SIZE 256
void SATBMarkQueueSet::print_all(const char* msg) {
char buffer[SATB_PRINTER_BUFFER_SIZE];
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
gclog_or_tty->cr();
gclog_or_tty->print_cr("SATB BUFFERS [%s]", msg);
BufferNode* nd = _completed_buffers_head;
int i = 0;
while (nd != NULL) {
void** buf = BufferNode::make_buffer_from_node(nd);
jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
ObjPtrQueue::print(buffer, buf, 0, _sz);
nd = nd->next();
i += 1;
}
for (JavaThread* t = Threads::first(); t; t = t->next()) {
jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
t->satb_mark_queue().print(buffer);
}
shared_satb_queue()->print("Shared");
gclog_or_tty->cr();
}
#endif // PRODUCT
void SATBMarkQueueSet::abandon_partial_marking() {
BufferNode* buffers_to_delete = NULL;
{
@ -316,5 +422,5 @@ void SATBMarkQueueSet::abandon_partial_marking() {
for (JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().reset();
}
shared_satb_queue()->reset();
shared_satb_queue()->reset();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,9 +29,26 @@
class ObjectClosure;
class JavaThread;
class SATBMarkQueueSet;
// A ptrQueue whose elements are "oops", pointers to object heads.
class ObjPtrQueue: public PtrQueue {
friend class SATBMarkQueueSet;
private:
// Filter out unwanted entries from the buffer.
void filter();
// Apply the closure to all elements.
void apply_closure(ObjectClosure* cl);
// Apply the closure to all elements and empty the buffer;
void apply_closure_and_empty(ObjectClosure* cl);
// Apply the closure to all elements of "buf", down to "index" (inclusive.)
static void apply_closure_to_buffer(ObjectClosure* cl,
void** buf, size_t index, size_t sz);
public:
ObjPtrQueue(PtrQueueSet* qset, bool perm = false) :
// SATB queues are only active during marking cycles. We create
@ -41,23 +58,23 @@ public:
// field to true. This is done in JavaThread::initialize_queues().
PtrQueue(qset, perm, false /* active */) { }
// Overrides PtrQueue::flush() so that it can filter the buffer
// before it is flushed.
virtual void flush();
// Overrides PtrQueue::should_enqueue_buffer(). See the method's
// definition for more information.
virtual bool should_enqueue_buffer();
// Apply the closure to all elements, and reset the index to make the
// buffer empty.
void apply_closure(ObjectClosure* cl);
// Apply the closure to all elements of "buf", down to "index" (inclusive.)
static void apply_closure_to_buffer(ObjectClosure* cl,
void** buf, size_t index, size_t sz);
#ifndef PRODUCT
// Helpful for debugging
void print(const char* name);
static void print(const char* name, void** buf, size_t index, size_t sz);
#endif // PRODUCT
void verify_oops_in_buffer() NOT_DEBUG_RETURN;
};
class SATBMarkQueueSet: public PtrQueueSet {
ObjectClosure* _closure;
ObjectClosure** _par_closures; // One per ParGCThread.
@ -88,6 +105,9 @@ public:
// set itself, has an active value same as expected_active.
void set_active_all_threads(bool b, bool expected_active);
// Filter all the currently-active SATB buffers.
void filter_thread_buffers();
// Register "blk" as "the closure" for all queues. Only one such closure
// is allowed. The "apply_closure_to_completed_buffer" method will apply
// this closure to a completed buffer, and "iterate_closure_all_threads"
@ -98,10 +118,9 @@ public:
// closures, one for each parallel GC thread.
void set_par_closure(int i, ObjectClosure* closure);
// If there is a registered closure for buffers, apply it to all entries
// in all currently-active buffers. This should only be applied at a
// safepoint. (Currently must not be called in parallel; this should
// change in the future.)
// Apply the registered closure to all entries on each
// currently-active buffer and then empty the buffer. It should only
// be called serially and at a safepoint.
void iterate_closure_all_threads();
// Parallel version of the above.
void par_iterate_closure_all_threads(int worker);
@ -117,11 +136,21 @@ public:
return apply_closure_to_completed_buffer_work(true, worker);
}
// Apply the given closure on enqueued and currently-active buffers
// respectively. Both methods are read-only, i.e., they do not
// modify any of the buffers.
void iterate_completed_buffers_read_only(ObjectClosure* cl);
void iterate_thread_buffers_read_only(ObjectClosure* cl);
#ifndef PRODUCT
// Helpful for debugging
void print_all(const char* msg);
#endif // PRODUCT
ObjPtrQueue* shared_satb_queue() { return &_shared_satb_queue; }
// If a marking is being abandoned, reset any unprocessed log buffers.
void abandon_partial_marking();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_SATBQUEUE_HPP

View File

@ -56,14 +56,14 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
int n_strides = n_threads * ParGCStridesPerThread;
uint n_strides = n_threads * ParGCStridesPerThread;
SequentialSubTasksDone* pst = sp->par_seq_tasks();
// Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done).
pst->set_n_threads(n_threads);
pst->set_n_tasks(n_strides);
int stride = 0;
uint stride = 0;
while (!pst->is_task_claimed(/* reference */ stride)) {
process_stride(sp, mr, stride, n_strides, cl, ct,
lowest_non_clean,

View File

@ -590,7 +590,7 @@ void ParNewGenTask::set_for_termination(int active_workers) {
// called after a task is started. So "i" is based on
// first-come-first-served.
void ParNewGenTask::work(int i) {
void ParNewGenTask::work(uint worker_id) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Since this is being done in a separate thread, need new resource
// and handle marks.
@ -601,8 +601,8 @@ void ParNewGenTask::work(int i) {
Generation* old_gen = gch->next_gen(_gen);
ParScanThreadState& par_scan_state = _state_set->thread_state(i);
assert(_state_set->is_valid(i), "Should not have been called");
ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
assert(_state_set->is_valid(worker_id), "Should not have been called");
par_scan_state.set_young_old_boundary(_young_old_boundary);
@ -755,7 +755,7 @@ public:
ParScanThreadStateSet& state_set);
private:
virtual void work(int i);
virtual void work(uint worker_id);
virtual void set_for_termination(int active_workers) {
_state_set.terminator()->reset_for_reuse(active_workers);
}
@ -781,13 +781,13 @@ ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
{
}
void ParNewRefProcTaskProxy::work(int i)
void ParNewRefProcTaskProxy::work(uint worker_id)
{
ResourceMark rm;
HandleMark hm;
ParScanThreadState& par_scan_state = _state_set.thread_state(i);
ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
par_scan_state.set_young_old_boundary(_young_old_boundary);
_task.work(i, par_scan_state.is_alive_closure(),
_task.work(worker_id, par_scan_state.is_alive_closure(),
par_scan_state.keep_alive_closure(),
par_scan_state.evacuate_followers_closure());
}
@ -802,9 +802,9 @@ public:
_task(task)
{ }
virtual void work(int i)
virtual void work(uint worker_id)
{
_task.work(i);
_task.work(worker_id);
}
};

View File

@ -239,7 +239,7 @@ public:
HeapWord* young_old_boundary() { return _young_old_boundary; }
void work(int i);
void work(uint worker_id);
// Reset the terminator in ParScanThreadStateSet for
// "active_workers" threads.

View File

@ -282,7 +282,7 @@ void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
// large page can be broken down if we require small pages.
os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
// Then we uncommit the pages in the range.
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
// And make them local/first-touch biased.
os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
}
@ -297,7 +297,7 @@ void MutableNUMASpace::free_region(MemRegion mr) {
assert((intptr_t)aligned_region.start() % page_size() == 0 &&
(intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
assert(region().contains(aligned_region), "Sanity");
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
}
}
@ -954,7 +954,7 @@ void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count
if (e != scan_end) {
if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
&& page_expected.size != 0) {
os::free_memory(s, pointer_delta(e, s, sizeof(char)));
os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
}
page_expected = page_found;
}

View File

@ -51,7 +51,7 @@ void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
size_t size = pointer_delta(end, start, sizeof(char));
if (clear_space) {
// Prefer page reallocation to migration.
os::free_memory((char*)start, size);
os::free_memory((char*)start, size, page_size);
}
os::numa_make_global((char*)start, size);
}

View File

@ -478,18 +478,22 @@ oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle r
void CollectedHeap::test_is_in() {
CollectedHeap* heap = Universe::heap();
uintptr_t epsilon = (uintptr_t) MinObjAlignment;
uintptr_t heap_start = (uintptr_t) heap->_reserved.start();
uintptr_t heap_end = (uintptr_t) heap->_reserved.end();
// Test that NULL is not in the heap.
assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
// Test that a pointer to before the heap start is reported as outside the heap.
assert(heap->_reserved.start() >= (void*)MinObjAlignment, "sanity");
void* before_heap = (void*)((intptr_t)heap->_reserved.start() - MinObjAlignment);
assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");
void* before_heap = (void*)(heap_start - epsilon);
assert(!heap->is_in(before_heap),
err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap));
// Test that a pointer to after the heap end is reported as outside the heap.
assert(heap->_reserved.end() <= (void*)(uintptr_t(-1) - (uint)MinObjAlignment), "sanity");
void* after_heap = (void*)((intptr_t)heap->_reserved.end() + MinObjAlignment);
assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");
void* after_heap = (void*)(heap_end + epsilon);
assert(!heap->is_in(after_heap),
err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap));
}

View File

@ -69,7 +69,7 @@ class CollectedHeap : public CHeapObj {
MemRegion _reserved;
BarrierSet* _barrier_set;
bool _is_gc_active;
int _n_par_threads;
uint _n_par_threads;
unsigned int _total_collections; // ... started
unsigned int _total_full_collections; // ... started
@ -309,10 +309,10 @@ class CollectedHeap : public CHeapObj {
GCCause::Cause gc_cause() { return _gc_cause; }
// Number of threads currently working on GC tasks.
int n_par_threads() { return _n_par_threads; }
uint n_par_threads() { return _n_par_threads; }
// May be overridden to set additional parallelism.
virtual void set_par_threads(int t) { _n_par_threads = t; };
virtual void set_par_threads(uint t) { _n_par_threads = t; };
// Preload classes into the shared portion of the heap, and then dump
// that data to a file so that it can be loaded directly by another

View File

@ -1402,7 +1402,7 @@ class LinkClassesClosure : public ObjectClosure {
instanceKlass* ik = (instanceKlass*) k;
// Link the class to cause the bytecodes to be rewritten and the
// cpcache to be created.
if (ik->get_init_state() < instanceKlass::linked) {
if (ik->init_state() < instanceKlass::linked) {
ik->link_class(THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting");
}
@ -1535,7 +1535,7 @@ void GenCollectedHeap::preload_and_dump(TRAPS) {
// are loaded in order that the related data structures (klass,
// cpCache, Sting constants) are located together.
if (ik->get_init_state() < instanceKlass::linked) {
if (ik->init_state() < instanceKlass::linked) {
ik->link_class(THREAD);
guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting");
}

Some files were not shown because too many files have changed in this diff Show More