Merge
This commit is contained in:
commit
958938e6a4
@ -95,7 +95,9 @@ static task_t getTask(JNIEnv *env, jobject this_obj) {
|
|||||||
#define CHECK_EXCEPTION_CLEAR_(value) if ((*env)->ExceptionOccurred(env)) { (*env)->ExceptionClear(env); return value; }
|
#define CHECK_EXCEPTION_CLEAR_(value) if ((*env)->ExceptionOccurred(env)) { (*env)->ExceptionClear(env); return value; }
|
||||||
|
|
||||||
static void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
|
static void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
|
||||||
(*env)->ThrowNew(env, (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
|
jclass exceptionClass = (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException");
|
||||||
|
CHECK_EXCEPTION;
|
||||||
|
(*env)->ThrowNew(env, exceptionClass, errMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
|
static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
|
||||||
@ -129,6 +131,7 @@ static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
|
|||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_init0(JNIEnv *env, jclass cls) {
|
Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_init0(JNIEnv *env, jclass cls) {
|
||||||
symbolicatorID = (*env)->GetFieldID(env, cls, "symbolicator", "J");
|
symbolicatorID = (*env)->GetFieldID(env, cls, "symbolicator", "J");
|
||||||
|
CHECK_EXCEPTION;
|
||||||
taskID = (*env)->GetFieldID(env, cls, "task", "J");
|
taskID = (*env)->GetFieldID(env, cls, "task", "J");
|
||||||
CHECK_EXCEPTION;
|
CHECK_EXCEPTION;
|
||||||
|
|
||||||
@ -236,13 +239,16 @@ JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_loo
|
|||||||
(JNIEnv *env, jobject this_obj, jlong addr) {
|
(JNIEnv *env, jobject this_obj, jlong addr) {
|
||||||
uintptr_t offset;
|
uintptr_t offset;
|
||||||
const char* sym = NULL;
|
const char* sym = NULL;
|
||||||
|
jstring sym_string;
|
||||||
|
|
||||||
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
|
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
|
||||||
if (ph != NULL && ph->core != NULL) {
|
if (ph != NULL && ph->core != NULL) {
|
||||||
sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
|
sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
|
||||||
if (sym == NULL) return 0;
|
if (sym == NULL) return 0;
|
||||||
|
sym_string = (*env)->NewStringUTF(env, sym);
|
||||||
|
CHECK_EXCEPTION_(0);
|
||||||
return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
|
return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
|
||||||
(*env)->NewStringUTF(env, sym), (jlong)offset);
|
sym_string, (jlong)offset);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -749,11 +755,14 @@ static void fillLoadObjects(JNIEnv* env, jobject this_obj, struct ps_prochandle*
|
|||||||
const char* name;
|
const char* name;
|
||||||
jobject loadObject;
|
jobject loadObject;
|
||||||
jobject loadObjectList;
|
jobject loadObjectList;
|
||||||
|
jstring nameString;
|
||||||
|
|
||||||
base = get_lib_base(ph, i);
|
base = get_lib_base(ph, i);
|
||||||
name = get_lib_name(ph, i);
|
name = get_lib_name(ph, i);
|
||||||
|
nameString = (*env)->NewStringUTF(env, name);
|
||||||
|
CHECK_EXCEPTION;
|
||||||
loadObject = (*env)->CallObjectMethod(env, this_obj, createLoadObject_ID,
|
loadObject = (*env)->CallObjectMethod(env, this_obj, createLoadObject_ID,
|
||||||
(*env)->NewStringUTF(env, name), (jlong)0, (jlong)base);
|
nameString, (jlong)0, (jlong)base);
|
||||||
CHECK_EXCEPTION;
|
CHECK_EXCEPTION;
|
||||||
loadObjectList = (*env)->GetObjectField(env, this_obj, loadObjectList_ID);
|
loadObjectList = (*env)->GetObjectField(env, this_obj, loadObjectList_ID);
|
||||||
CHECK_EXCEPTION;
|
CHECK_EXCEPTION;
|
||||||
|
@ -287,8 +287,43 @@ else
|
|||||||
@$(ECHO) "Error: trying to build a minimal target but JVM_VARIANT_MINIMAL1 is not true."
|
@$(ECHO) "Error: trying to build a minimal target but JVM_VARIANT_MINIMAL1 is not true."
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
remove_old_debuginfo:
|
||||||
|
ifeq ($(JVM_VARIANT_CLIENT), true)
|
||||||
|
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||||
|
ifeq ($(OSNAME), windows)
|
||||||
|
$(RM) -f $(EXPORT_CLIENT_DIR)/jvm.map $(EXPORT_CLIENT_DIR)/jvm.pdb
|
||||||
|
else
|
||||||
|
$(RM) -f $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
$(RM) -f $(EXPORT_CLIENT_DIR)/libjvm.diz
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
|
||||||
|
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||||
|
ifeq ($(OSNAME), windows)
|
||||||
|
$(RM) -f $(EXPORT_SERVER_DIR)/jvm.map $(EXPORT_SERVER_DIR)/jvm.pdb
|
||||||
|
else
|
||||||
|
ifeq ($(OS_VENDOR), Darwin)
|
||||||
|
$(RM) -rf $(EXPORT_SERVER_DIR)/libjvm.dylib.dSYM
|
||||||
|
else
|
||||||
|
$(RM) -f $(EXPORT_SERVER_DIR)/libjvm.debuginfo
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
$(RM) -f $(EXPORT_SERVER_DIR)/libjvm.diz
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
ifeq ($(JVM_VARIANT_MINIMAL1),true)
|
||||||
|
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||||
|
$(RM) -f $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
|
||||||
|
else
|
||||||
|
$(RM) -f $(EXPORT_MINIMAL_DIR)/libjvm.diz
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
# Export file rule
|
# Export file rule
|
||||||
generic_export: $(EXPORT_LIST)
|
generic_export: $(EXPORT_LIST) remove_old_debuginfo
|
||||||
|
|
||||||
export_product:
|
export_product:
|
||||||
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export
|
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export
|
||||||
@ -841,4 +876,4 @@ include $(GAMMADIR)/make/jprt.gmk
|
|||||||
export_jdk_product export_jdk_fastdebug export_jdk_debug \
|
export_jdk_product export_jdk_fastdebug export_jdk_debug \
|
||||||
create_jdk copy_jdk update_jdk test_jdk \
|
create_jdk copy_jdk update_jdk test_jdk \
|
||||||
copy_product_jdk copy_fastdebug_jdk copy_debug_jdk \
|
copy_product_jdk copy_fastdebug_jdk copy_debug_jdk \
|
||||||
$(HS_ALT_MAKE)/Makefile.make
|
$(HS_ALT_MAKE)/Makefile.make remove_old_debuginfo
|
||||||
|
@ -101,7 +101,7 @@ CXXFLAGS = \
|
|||||||
# This is VERY important! The version define must only be supplied to vm_version.o
|
# This is VERY important! The version define must only be supplied to vm_version.o
|
||||||
# If not, ccache will not re-use the cache at all, since the version string might contain
|
# If not, ccache will not re-use the cache at all, since the version string might contain
|
||||||
# a time and date.
|
# a time and date.
|
||||||
vm_version.o: CXXFLAGS += ${JRE_VERSION}
|
CXXFLAGS/vm_version.o += ${JRE_VERSION}
|
||||||
|
|
||||||
CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
|
CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -24,7 +24,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/assembler.hpp"
|
|
||||||
#include "asm/assembler.inline.hpp"
|
#include "asm/assembler.inline.hpp"
|
||||||
#include "gc_interface/collectedHeap.inline.hpp"
|
#include "gc_interface/collectedHeap.inline.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
@ -37,6 +36,7 @@
|
|||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
@ -384,10 +384,10 @@ int Assembler::load_const_optimized(Register d, long x, Register tmp, bool retur
|
|||||||
bool load_xa = (xa != 0) || (xb < 0);
|
bool load_xa = (xa != 0) || (xb < 0);
|
||||||
bool return_xd = false;
|
bool return_xd = false;
|
||||||
|
|
||||||
if (load_xa) lis(tmp, xa);
|
if (load_xa) { lis(tmp, xa); }
|
||||||
if (xc) lis(d, xc);
|
if (xc) { lis(d, xc); }
|
||||||
if (load_xa) {
|
if (load_xa) {
|
||||||
if (xb) ori(tmp, tmp, xb); // No addi, we support tmp == R0.
|
if (xb) { ori(tmp, tmp, (unsigned short)xb); } // No addi, we support tmp == R0.
|
||||||
} else {
|
} else {
|
||||||
li(tmp, xb); // non-negative
|
li(tmp, xb); // non-negative
|
||||||
}
|
}
|
||||||
@ -409,18 +409,18 @@ int Assembler::load_const_optimized(Register d, long x, Register tmp, bool retur
|
|||||||
// opt 4: avoid adding 0
|
// opt 4: avoid adding 0
|
||||||
if (xa) { // Highest 16-bit needed?
|
if (xa) { // Highest 16-bit needed?
|
||||||
lis(d, xa);
|
lis(d, xa);
|
||||||
if (xb) addi(d, d, xb);
|
if (xb) { addi(d, d, xb); }
|
||||||
} else {
|
} else {
|
||||||
li(d, xb);
|
li(d, xb);
|
||||||
}
|
}
|
||||||
sldi(d, d, 32);
|
sldi(d, d, 32);
|
||||||
if (xc) addis(d, d, xc);
|
if (xc) { addis(d, d, xc); }
|
||||||
}
|
}
|
||||||
|
|
||||||
// opt 5: Return offset to be inserted into following instruction.
|
// opt 5: Return offset to be inserted into following instruction.
|
||||||
if (return_simm16_rest) return xd;
|
if (return_simm16_rest) return xd;
|
||||||
|
|
||||||
if (xd) addi(d, d, xd);
|
if (xd) { addi(d, d, xd); }
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -696,4 +696,5 @@ void Assembler::test_asm() {
|
|||||||
tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
|
tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
|
||||||
code()->decode();
|
code()->decode();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // !PRODUCT
|
#endif // !PRODUCT
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -139,7 +139,8 @@ inline void Assembler::cmpldi(ConditionRegister crx, Register a, int ui16) { A
|
|||||||
inline void Assembler::cmplw( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 0, a, b); }
|
inline void Assembler::cmplw( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 0, a, b); }
|
||||||
inline void Assembler::cmpld( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 1, a, b); }
|
inline void Assembler::cmpld( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 1, a, b); }
|
||||||
|
|
||||||
inline void Assembler::isel(Register d, Register a, Register b, int c) { emit_int32(ISEL_OPCODE | rt(d) | ra(a) | rb(b) | bc(c)); }
|
inline void Assembler::isel(Register d, Register a, Register b, int c) { guarantee(VM_Version::has_isel(), "opcode not supported on this hardware");
|
||||||
|
emit_int32(ISEL_OPCODE | rt(d) | ra(a) | rb(b) | bc(c)); }
|
||||||
|
|
||||||
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
|
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
|
||||||
inline void Assembler::andi_( Register a, Register s, int ui16) { emit_int32(ANDI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
|
inline void Assembler::andi_( Register a, Register s, int ui16) { emit_int32(ANDI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
|
||||||
@ -531,9 +532,12 @@ inline void Assembler::fmr_(FloatRegister d, FloatRegister b) { emit_int32( FMR_
|
|||||||
//inline void Assembler::mffgpr( FloatRegister d, Register b) { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
|
//inline void Assembler::mffgpr( FloatRegister d, Register b) { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
|
||||||
//inline void Assembler::mftgpr( Register d, FloatRegister b) { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
|
//inline void Assembler::mftgpr( Register d, FloatRegister b) { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
|
||||||
// add cmpb and popcntb to detect ppc power version.
|
// add cmpb and popcntb to detect ppc power version.
|
||||||
inline void Assembler::cmpb( Register a, Register s, Register b) { emit_int32( CMPB_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
inline void Assembler::cmpb( Register a, Register s, Register b) { guarantee(VM_Version::has_cmpb(), "opcode not supported on this hardware");
|
||||||
inline void Assembler::popcntb(Register a, Register s) { emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
|
emit_int32( CMPB_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||||
inline void Assembler::popcntw(Register a, Register s) { emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
|
inline void Assembler::popcntb(Register a, Register s) { guarantee(VM_Version::has_popcntb(), "opcode not supported on this hardware");
|
||||||
|
emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
|
||||||
|
inline void Assembler::popcntw(Register a, Register s) { guarantee(VM_Version::has_popcntw(), "opcode not supported on this hardware");
|
||||||
|
emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
|
||||||
inline void Assembler::popcntd(Register a, Register s) { emit_int32( POPCNTD_OPCODE | rta(a) | rs(s)); };
|
inline void Assembler::popcntd(Register a, Register s) { emit_int32( POPCNTD_OPCODE | rta(a) | rs(s)); };
|
||||||
|
|
||||||
inline void Assembler::fneg( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE | frt(d) | frb(b) | rc(0)); }
|
inline void Assembler::fneg( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||||
@ -568,14 +572,17 @@ inline void Assembler::fctidz(FloatRegister d, FloatRegister b) { emit_int32( FC
|
|||||||
inline void Assembler::fctiw( FloatRegister d, FloatRegister b) { emit_int32( FCTIW_OPCODE | frt(d) | frb(b) | rc(0)); }
|
inline void Assembler::fctiw( FloatRegister d, FloatRegister b) { emit_int32( FCTIW_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||||
inline void Assembler::fctiwz(FloatRegister d, FloatRegister b) { emit_int32( FCTIWZ_OPCODE | frt(d) | frb(b) | rc(0)); }
|
inline void Assembler::fctiwz(FloatRegister d, FloatRegister b) { emit_int32( FCTIWZ_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||||
inline void Assembler::fcfid( FloatRegister d, FloatRegister b) { emit_int32( FCFID_OPCODE | frt(d) | frb(b) | rc(0)); }
|
inline void Assembler::fcfid( FloatRegister d, FloatRegister b) { emit_int32( FCFID_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||||
inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
|
inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fcfids(), "opcode not supported on this hardware");
|
||||||
|
emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||||
|
|
||||||
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
|
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
|
||||||
inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b) { emit_int32( FCMPU_OPCODE | bf(crx) | fra(a) | frb(b)); }
|
inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b) { emit_int32( FCMPU_OPCODE | bf(crx) | fra(a) | frb(b)); }
|
||||||
|
|
||||||
// PPC 1, section 5.2.1 Floating-Point Arithmetic Instructions
|
// PPC 1, section 5.2.1 Floating-Point Arithmetic Instructions
|
||||||
inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { emit_int32( FSQRT_OPCODE | frt(d) | frb(b) | rc(0)); }
|
inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fsqrt(), "opcode not supported on this hardware");
|
||||||
inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
|
emit_int32( FSQRT_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||||
|
inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fsqrts(), "opcode not supported on this hardware");
|
||||||
|
emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||||
|
|
||||||
// Vector instructions for >= Power6.
|
// Vector instructions for >= Power6.
|
||||||
inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||||
@ -703,7 +710,8 @@ inline void Assembler::vcmpgtsw_(VectorRegister d,VectorRegister a, VectorRegist
|
|||||||
inline void Assembler::vcmpgtub_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
inline void Assembler::vcmpgtub_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||||
inline void Assembler::vcmpgtuh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
inline void Assembler::vcmpgtuh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||||
inline void Assembler::vcmpgtuw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
inline void Assembler::vcmpgtuw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||||
inline void Assembler::vand( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAND_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
inline void Assembler::vand( VectorRegister d, VectorRegister a, VectorRegister b) { guarantee(VM_Version::has_vand(), "opcode not supported on this hardware");
|
||||||
|
emit_int32( VAND_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||||
inline void Assembler::vandc( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
inline void Assembler::vandc( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||||
inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||||
inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -87,7 +87,7 @@ define_pd_global(uint64_t,MaxRAM, 4ULL*G);
|
|||||||
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
||||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||||
|
|
||||||
define_pd_global(bool, TrapBasedRangeChecks, false);
|
define_pd_global(bool, TrapBasedRangeChecks, true);
|
||||||
|
|
||||||
// Heap related flags
|
// Heap related flags
|
||||||
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
|
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -24,8 +24,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/assembler.hpp"
|
|
||||||
#include "asm/assembler.inline.hpp"
|
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
#include "gc_interface/collectedHeap.inline.hpp"
|
#include "gc_interface/collectedHeap.inline.hpp"
|
||||||
@ -1120,7 +1118,7 @@ address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
|
|||||||
}
|
}
|
||||||
return _last_calls_return_pc;
|
return _last_calls_return_pc;
|
||||||
}
|
}
|
||||||
#endif
|
#endif // ABI_ELFv2
|
||||||
|
|
||||||
void MacroAssembler::call_VM_base(Register oop_result,
|
void MacroAssembler::call_VM_base(Register oop_result,
|
||||||
Register last_java_sp,
|
Register last_java_sp,
|
||||||
@ -1794,7 +1792,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
|
|||||||
cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
|
cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
|
||||||
bne(cr_reg, cas_label);
|
bne(cr_reg, cas_label);
|
||||||
|
|
||||||
load_klass_with_trap_null_check(temp_reg, obj_reg);
|
load_klass(temp_reg, obj_reg);
|
||||||
|
|
||||||
load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
|
load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
|
||||||
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
|
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
|
||||||
@ -1891,7 +1889,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
|
|||||||
// the bias from one thread to another directly in this situation.
|
// the bias from one thread to another directly in this situation.
|
||||||
andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
|
andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
|
||||||
orr(temp_reg, R16_thread, temp_reg);
|
orr(temp_reg, R16_thread, temp_reg);
|
||||||
load_klass_with_trap_null_check(temp2_reg, obj_reg);
|
load_klass(temp2_reg, obj_reg);
|
||||||
ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
|
ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
|
||||||
orr(temp_reg, temp_reg, temp2_reg);
|
orr(temp_reg, temp_reg, temp2_reg);
|
||||||
|
|
||||||
@ -1927,7 +1925,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
|
|||||||
// that another thread raced us for the privilege of revoking the
|
// that another thread raced us for the privilege of revoking the
|
||||||
// bias of this particular object, so it's okay to continue in the
|
// bias of this particular object, so it's okay to continue in the
|
||||||
// normal locking code.
|
// normal locking code.
|
||||||
load_klass_with_trap_null_check(temp_reg, obj_reg);
|
load_klass(temp_reg, obj_reg);
|
||||||
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
|
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
|
||||||
andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
|
andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
|
||||||
orr(temp_reg, temp_reg, temp2_reg);
|
orr(temp_reg, temp_reg, temp2_reg);
|
||||||
@ -2213,8 +2211,7 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Regis
|
|||||||
stbx(R0, Rtmp, Robj);
|
stbx(R0, Rtmp, Robj);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef SERIALGC
|
#if INCLUDE_ALL_GCS
|
||||||
|
|
||||||
// General G1 pre-barrier generator.
|
// General G1 pre-barrier generator.
|
||||||
// Goal: record the previous value if it is not null.
|
// Goal: record the previous value if it is not null.
|
||||||
void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
|
void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
|
||||||
@ -2328,14 +2325,17 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
|
|||||||
|
|
||||||
// Get the address of the card.
|
// Get the address of the card.
|
||||||
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
|
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
|
||||||
|
cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
||||||
|
beq(CCR0, filtered);
|
||||||
|
|
||||||
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
|
membar(Assembler::StoreLoad);
|
||||||
cmpwi(CCR0, Rtmp3 /* card value */, 0);
|
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
|
||||||
|
cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
|
||||||
beq(CCR0, filtered);
|
beq(CCR0, filtered);
|
||||||
|
|
||||||
// Storing a region crossing, non-NULL oop, card is clean.
|
// Storing a region crossing, non-NULL oop, card is clean.
|
||||||
// Dirty card and log.
|
// Dirty card and log.
|
||||||
li(Rtmp3, 0); // dirty
|
li(Rtmp3, CardTableModRefBS::dirty_card_val());
|
||||||
//release(); // G1: oops are allowed to get visible after dirty marking.
|
//release(); // G1: oops are allowed to get visible after dirty marking.
|
||||||
stbx(Rtmp3, Rbase, Rcard_addr);
|
stbx(Rtmp3, Rbase, Rcard_addr);
|
||||||
|
|
||||||
@ -2362,7 +2362,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
|
|||||||
|
|
||||||
bind(filtered_int);
|
bind(filtered_int);
|
||||||
}
|
}
|
||||||
#endif // SERIALGC
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
// Values for last_Java_pc, and last_Java_sp must comply to the rules
|
// Values for last_Java_pc, and last_Java_sp must comply to the rules
|
||||||
// in frame_ppc64.hpp.
|
// in frame_ppc64.hpp.
|
||||||
@ -2453,7 +2453,8 @@ void MacroAssembler::get_vm_result_2(Register metadata_result) {
|
|||||||
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
|
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
|
||||||
Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
|
Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
|
||||||
if (Universe::narrow_klass_base() != 0) {
|
if (Universe::narrow_klass_base() != 0) {
|
||||||
load_const(R0, Universe::narrow_klass_base(), (dst != current) ? dst : noreg); // Use dst as temp if it is free.
|
// Use dst as temp if it is free.
|
||||||
|
load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
|
||||||
sub(dst, current, R0);
|
sub(dst, current, R0);
|
||||||
current = dst;
|
current = dst;
|
||||||
}
|
}
|
||||||
|
@ -514,14 +514,14 @@ class MacroAssembler: public Assembler {
|
|||||||
void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
|
void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
|
||||||
void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
|
void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
|
||||||
|
|
||||||
#ifndef SERIALGC
|
#if INCLUDE_ALL_GCS
|
||||||
// General G1 pre-barrier generator.
|
// General G1 pre-barrier generator.
|
||||||
void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
|
void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
|
||||||
Register Rtmp1, Register Rtmp2, bool needs_frame = false);
|
Register Rtmp1, Register Rtmp2, bool needs_frame = false);
|
||||||
// General G1 post-barrier generator
|
// General G1 post-barrier generator
|
||||||
void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
|
void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
|
||||||
Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
|
Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
|
||||||
#endif // SERIALGC
|
#endif
|
||||||
|
|
||||||
// Support for managing the JavaThread pointer (i.e.; the reference to
|
// Support for managing the JavaThread pointer (i.e.; the reference to
|
||||||
// thread-local information).
|
// thread-local information).
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -119,6 +119,7 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
|
|||||||
|
|
||||||
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
|
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
|
||||||
bool for_compiler_entry) {
|
bool for_compiler_entry) {
|
||||||
|
Label L_no_such_method;
|
||||||
assert(method == R19_method, "interpreter calling convention");
|
assert(method == R19_method, "interpreter calling convention");
|
||||||
assert_different_registers(method, target, temp);
|
assert_different_registers(method, target, temp);
|
||||||
|
|
||||||
@ -131,17 +132,31 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
|||||||
__ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
__ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||||
__ cmplwi(CCR0, temp, 0);
|
__ cmplwi(CCR0, temp, 0);
|
||||||
__ beq(CCR0, run_compiled_code);
|
__ beq(CCR0, run_compiled_code);
|
||||||
|
// Null method test is replicated below in compiled case,
|
||||||
|
// it might be able to address across the verify_thread()
|
||||||
|
__ cmplwi(CCR0, R19_method, 0);
|
||||||
|
__ beq(CCR0, L_no_such_method);
|
||||||
__ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
|
__ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
|
||||||
__ mtctr(target);
|
__ mtctr(target);
|
||||||
__ bctr();
|
__ bctr();
|
||||||
__ BIND(run_compiled_code);
|
__ BIND(run_compiled_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compiled case, either static or fall-through from runtime conditional
|
||||||
|
__ cmplwi(CCR0, R19_method, 0);
|
||||||
|
__ beq(CCR0, L_no_such_method);
|
||||||
|
|
||||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||||
Method::from_interpreted_offset();
|
Method::from_interpreted_offset();
|
||||||
__ ld(target, in_bytes(entry_offset), R19_method);
|
__ ld(target, in_bytes(entry_offset), R19_method);
|
||||||
__ mtctr(target);
|
__ mtctr(target);
|
||||||
__ bctr();
|
__ bctr();
|
||||||
|
|
||||||
|
__ bind(L_no_such_method);
|
||||||
|
assert(StubRoutines::throw_AbstractMethodError_entry() != NULL, "not yet generated!");
|
||||||
|
__ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry());
|
||||||
|
__ mtctr(target);
|
||||||
|
__ bctr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -891,6 +891,13 @@ definitions %{
|
|||||||
// This is a block of C++ code which provides values, functions, and
|
// This is a block of C++ code which provides values, functions, and
|
||||||
// definitions necessary in the rest of the architecture description.
|
// definitions necessary in the rest of the architecture description.
|
||||||
source_hpp %{
|
source_hpp %{
|
||||||
|
// Header information of the source block.
|
||||||
|
// Method declarations/definitions which are used outside
|
||||||
|
// the ad-scope can conveniently be defined here.
|
||||||
|
//
|
||||||
|
// To keep related declarations/definitions/uses close together,
|
||||||
|
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
|
||||||
|
|
||||||
// Returns true if Node n is followed by a MemBar node that
|
// Returns true if Node n is followed by a MemBar node that
|
||||||
// will do an acquire. If so, this node must not do the acquire
|
// will do an acquire. If so, this node must not do the acquire
|
||||||
// operation.
|
// operation.
|
||||||
@ -1114,6 +1121,40 @@ static inline void emit_long(CodeBuffer &cbuf, int value) {
|
|||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
|
|
||||||
|
%} // interrupt source
|
||||||
|
|
||||||
|
source_hpp %{ // Header information of the source block.
|
||||||
|
|
||||||
|
//--------------------------------------------------------------
|
||||||
|
//---< Used for optimization in Compile::Shorten_branches >---
|
||||||
|
//--------------------------------------------------------------
|
||||||
|
|
||||||
|
const uint trampoline_stub_size = 6 * BytesPerInstWord;
|
||||||
|
|
||||||
|
class CallStubImpl {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
|
||||||
|
|
||||||
|
// Size of call trampoline stub.
|
||||||
|
// This doesn't need to be accurate to the byte, but it
|
||||||
|
// must be larger than or equal to the real size of the stub.
|
||||||
|
static uint size_call_trampoline() {
|
||||||
|
return trampoline_stub_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// number of relocations needed by a call trampoline stub
|
||||||
|
static uint reloc_call_trampoline() {
|
||||||
|
return 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
%} // end source_hpp
|
||||||
|
|
||||||
|
source %{
|
||||||
|
|
||||||
// Emit a trampoline stub for a call to a target which is too far away.
|
// Emit a trampoline stub for a call to a target which is too far away.
|
||||||
//
|
//
|
||||||
// code sequences:
|
// code sequences:
|
||||||
@ -1125,9 +1166,7 @@ static inline void emit_long(CodeBuffer &cbuf, int value) {
|
|||||||
// load the call target from the constant pool
|
// load the call target from the constant pool
|
||||||
// branch via CTR (LR/link still points to the call-site above)
|
// branch via CTR (LR/link still points to the call-site above)
|
||||||
|
|
||||||
const uint trampoline_stub_size = 6 * BytesPerInstWord;
|
void CallStubImpl::emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
|
||||||
|
|
||||||
void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
|
|
||||||
// Start the stub.
|
// Start the stub.
|
||||||
address stub = __ start_a_stub(Compile::MAX_stubs_size/2);
|
address stub = __ start_a_stub(Compile::MAX_stubs_size/2);
|
||||||
if (stub == NULL) {
|
if (stub == NULL) {
|
||||||
@ -1170,19 +1209,6 @@ void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int
|
|||||||
__ end_a_stub();
|
__ end_a_stub();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size of trampoline stub, this doesn't need to be accurate but it must
|
|
||||||
// be larger or equal to the real size of the stub.
|
|
||||||
// Used for optimization in Compile::Shorten_branches.
|
|
||||||
uint size_call_trampoline() {
|
|
||||||
return trampoline_stub_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Number of relocation entries needed by trampoline stub.
|
|
||||||
// Used for optimization in Compile::Shorten_branches.
|
|
||||||
uint reloc_call_trampoline() {
|
|
||||||
return 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
|
|
||||||
// Emit an inline branch-and-link call and a related trampoline stub.
|
// Emit an inline branch-and-link call and a related trampoline stub.
|
||||||
@ -1221,7 +1247,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en
|
|||||||
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
||||||
|
|
||||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||||
emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
|
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
|
||||||
__ relocate(rtype);
|
__ relocate(rtype);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2023,17 +2049,34 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
|
|||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
|
|
||||||
uint size_exception_handler() {
|
%} // interrupt source
|
||||||
// The exception_handler is a b64_patchable.
|
|
||||||
return MacroAssembler::b64_patchable_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint size_deopt_handler() {
|
source_hpp %{ // Header information of the source block.
|
||||||
// The deopt_handler is a bl64_patchable.
|
|
||||||
return MacroAssembler::bl64_patchable_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
int emit_exception_handler(CodeBuffer &cbuf) {
|
class HandlerImpl {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static int emit_exception_handler(CodeBuffer &cbuf);
|
||||||
|
static int emit_deopt_handler(CodeBuffer& cbuf);
|
||||||
|
|
||||||
|
static uint size_exception_handler() {
|
||||||
|
// The exception_handler is a b64_patchable.
|
||||||
|
return MacroAssembler::b64_patchable_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint size_deopt_handler() {
|
||||||
|
// The deopt_handler is a bl64_patchable.
|
||||||
|
return MacroAssembler::bl64_patchable_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
%} // end source_hpp
|
||||||
|
|
||||||
|
source %{
|
||||||
|
|
||||||
|
int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
|
||||||
MacroAssembler _masm(&cbuf);
|
MacroAssembler _masm(&cbuf);
|
||||||
|
|
||||||
address base = __ start_a_stub(size_exception_handler());
|
address base = __ start_a_stub(size_exception_handler());
|
||||||
@ -2050,7 +2093,7 @@ int emit_exception_handler(CodeBuffer &cbuf) {
|
|||||||
|
|
||||||
// The deopt_handler is like the exception handler, but it calls to
|
// The deopt_handler is like the exception handler, but it calls to
|
||||||
// the deoptimization blob instead of jumping to the exception blob.
|
// the deoptimization blob instead of jumping to the exception blob.
|
||||||
int emit_deopt_handler(CodeBuffer& cbuf) {
|
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
|
||||||
MacroAssembler _masm(&cbuf);
|
MacroAssembler _masm(&cbuf);
|
||||||
|
|
||||||
address base = __ start_a_stub(size_deopt_handler());
|
address base = __ start_a_stub(size_deopt_handler());
|
||||||
@ -3438,7 +3481,7 @@ encode %{
|
|||||||
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
||||||
|
|
||||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||||
emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
|
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
|
||||||
__ relocate(_optimized_virtual ?
|
__ relocate(_optimized_virtual ?
|
||||||
relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
|
relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
|
||||||
}
|
}
|
||||||
@ -3481,7 +3524,7 @@ encode %{
|
|||||||
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
||||||
|
|
||||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||||
emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
|
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
|
||||||
assert(_optimized_virtual, "methodHandle call should be a virtual call");
|
assert(_optimized_virtual, "methodHandle call should be a virtual call");
|
||||||
__ relocate(relocInfo::opt_virtual_call_type);
|
__ relocate(relocInfo::opt_virtual_call_type);
|
||||||
}
|
}
|
||||||
@ -3531,7 +3574,7 @@ encode %{
|
|||||||
const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
|
const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
|
||||||
const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
|
const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
|
||||||
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
|
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
|
||||||
emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
|
CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
|
||||||
|
|
||||||
if (ra_->C->env()->failing())
|
if (ra_->C->env()->failing())
|
||||||
return;
|
return;
|
||||||
@ -8755,6 +8798,7 @@ instruct sqrtD_reg(regD dst, regD src) %{
|
|||||||
// Single-precision sqrt.
|
// Single-precision sqrt.
|
||||||
instruct sqrtF_reg(regF dst, regF src) %{
|
instruct sqrtF_reg(regF dst, regF src) %{
|
||||||
match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
|
match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
|
||||||
|
predicate(VM_Version::has_fsqrts());
|
||||||
ins_cost(DEFAULT_COST);
|
ins_cost(DEFAULT_COST);
|
||||||
|
|
||||||
format %{ "FSQRTS $dst, $src" %}
|
format %{ "FSQRTS $dst, $src" %}
|
||||||
@ -11550,8 +11594,7 @@ instruct safePoint_poll_conPollAddr(rscratch2RegP poll) %{
|
|||||||
// effect no longer needs to be mentioned, since r0 is not contained
|
// effect no longer needs to be mentioned, since r0 is not contained
|
||||||
// in a reg_class.
|
// in a reg_class.
|
||||||
|
|
||||||
format %{ "LD R12, addr of polling page\n\t"
|
format %{ "LD R0, #0, R12 \t// Safepoint poll for GC" %}
|
||||||
"LD R0, #0, R12 \t// Safepoint poll for GC" %}
|
|
||||||
ins_encode( enc_poll(0x0, poll) );
|
ins_encode( enc_poll(0x0, poll) );
|
||||||
ins_pipe(pipe_class_default);
|
ins_pipe(pipe_class_default);
|
||||||
%}
|
%}
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/vframeArray.hpp"
|
#include "runtime/vframeArray.hpp"
|
||||||
#include "vmreg_ppc.inline.hpp"
|
#include "vmreg_ppc.inline.hpp"
|
||||||
|
#include "adfiles/ad_ppc_64.hpp"
|
||||||
#ifdef COMPILER1
|
#ifdef COMPILER1
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
#endif
|
#endif
|
||||||
@ -52,10 +53,6 @@
|
|||||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||||
|
|
||||||
|
|
||||||
// Used by generate_deopt_blob. Defined in .ad file.
|
|
||||||
extern uint size_deopt_handler();
|
|
||||||
|
|
||||||
|
|
||||||
class RegisterSaver {
|
class RegisterSaver {
|
||||||
// Used for saving volatile registers.
|
// Used for saving volatile registers.
|
||||||
public:
|
public:
|
||||||
@ -2782,7 +2779,7 @@ void SharedRuntime::generate_deopt_blob() {
|
|||||||
// We can't grab a free register here, because all registers may
|
// We can't grab a free register here, because all registers may
|
||||||
// contain live values, so let the RegisterSaver do the adjustment
|
// contain live values, so let the RegisterSaver do the adjustment
|
||||||
// of the return pc.
|
// of the return pc.
|
||||||
const int return_pc_adjustment_no_exception = -size_deopt_handler();
|
const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler();
|
||||||
|
|
||||||
// Push the "unpack frame"
|
// Push the "unpack frame"
|
||||||
// Save everything in sight.
|
// Save everything in sight.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -23,17 +23,6 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
|
||||||
#include "runtime/deoptimization.hpp"
|
|
||||||
#include "runtime/frame.inline.hpp"
|
|
||||||
#include "runtime/stubRoutines.hpp"
|
|
||||||
#ifdef TARGET_OS_FAMILY_aix
|
|
||||||
# include "thread_aix.inline.hpp"
|
|
||||||
#endif
|
|
||||||
#ifdef TARGET_OS_FAMILY_linux
|
|
||||||
# include "thread_linux.inline.hpp"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Implementation of the platform-specific part of StubRoutines - for
|
// Implementation of the platform-specific part of StubRoutines - for
|
||||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -402,6 +402,9 @@ void VM_Version::determine_features() {
|
|||||||
CodeBuffer cb("detect_cpu_features", code_size, 0);
|
CodeBuffer cb("detect_cpu_features", code_size, 0);
|
||||||
MacroAssembler* a = new MacroAssembler(&cb);
|
MacroAssembler* a = new MacroAssembler(&cb);
|
||||||
|
|
||||||
|
// Must be set to true so we can generate the test code.
|
||||||
|
_features = VM_Version::all_features_m;
|
||||||
|
|
||||||
// Emit code.
|
// Emit code.
|
||||||
void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
|
void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
|
||||||
uint32_t *code = (uint32_t *)a->pc();
|
uint32_t *code = (uint32_t *)a->pc();
|
||||||
@ -409,14 +412,15 @@ void VM_Version::determine_features() {
|
|||||||
// Keep R3_ARG1 unmodified, it contains &field (see below).
|
// Keep R3_ARG1 unmodified, it contains &field (see below).
|
||||||
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
|
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
|
||||||
a->fsqrt(F3, F4); // code[0] -> fsqrt_m
|
a->fsqrt(F3, F4); // code[0] -> fsqrt_m
|
||||||
a->isel(R7, R5, R6, 0); // code[1] -> isel_m
|
a->fsqrts(F3, F4); // code[1] -> fsqrts_m
|
||||||
a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[2] -> lxarx_m
|
a->isel(R7, R5, R6, 0); // code[2] -> isel_m
|
||||||
a->cmpb(R7, R5, R6); // code[3] -> bcmp
|
a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m
|
||||||
//a->mftgpr(R7, F3); // code[4] -> mftgpr
|
a->cmpb(R7, R5, R6); // code[4] -> bcmp
|
||||||
a->popcntb(R7, R5); // code[5] -> popcntb
|
//a->mftgpr(R7, F3); // code[5] -> mftgpr
|
||||||
a->popcntw(R7, R5); // code[6] -> popcntw
|
a->popcntb(R7, R5); // code[6] -> popcntb
|
||||||
a->fcfids(F3, F4); // code[7] -> fcfids
|
a->popcntw(R7, R5); // code[7] -> popcntw
|
||||||
a->vand(VR0, VR0, VR0); // code[8] -> vand
|
a->fcfids(F3, F4); // code[8] -> fcfids
|
||||||
|
a->vand(VR0, VR0, VR0); // code[9] -> vand
|
||||||
a->blr();
|
a->blr();
|
||||||
|
|
||||||
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
|
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
|
||||||
@ -426,6 +430,7 @@ void VM_Version::determine_features() {
|
|||||||
|
|
||||||
uint32_t *code_end = (uint32_t *)a->pc();
|
uint32_t *code_end = (uint32_t *)a->pc();
|
||||||
a->flush();
|
a->flush();
|
||||||
|
_features = VM_Version::unknown_m;
|
||||||
|
|
||||||
// Print the detection code.
|
// Print the detection code.
|
||||||
if (PrintAssembly) {
|
if (PrintAssembly) {
|
||||||
@ -450,6 +455,7 @@ void VM_Version::determine_features() {
|
|||||||
// determine which instructions are legal.
|
// determine which instructions are legal.
|
||||||
int feature_cntr = 0;
|
int feature_cntr = 0;
|
||||||
if (code[feature_cntr++]) features |= fsqrt_m;
|
if (code[feature_cntr++]) features |= fsqrt_m;
|
||||||
|
if (code[feature_cntr++]) features |= fsqrts_m;
|
||||||
if (code[feature_cntr++]) features |= isel_m;
|
if (code[feature_cntr++]) features |= isel_m;
|
||||||
if (code[feature_cntr++]) features |= lxarxeh_m;
|
if (code[feature_cntr++]) features |= lxarxeh_m;
|
||||||
if (code[feature_cntr++]) features |= cmpb_m;
|
if (code[feature_cntr++]) features |= cmpb_m;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -33,6 +33,7 @@ class VM_Version: public Abstract_VM_Version {
|
|||||||
protected:
|
protected:
|
||||||
enum Feature_Flag {
|
enum Feature_Flag {
|
||||||
fsqrt,
|
fsqrt,
|
||||||
|
fsqrts,
|
||||||
isel,
|
isel,
|
||||||
lxarxeh,
|
lxarxeh,
|
||||||
cmpb,
|
cmpb,
|
||||||
@ -46,6 +47,7 @@ protected:
|
|||||||
enum Feature_Flag_Set {
|
enum Feature_Flag_Set {
|
||||||
unknown_m = 0,
|
unknown_m = 0,
|
||||||
fsqrt_m = (1 << fsqrt ),
|
fsqrt_m = (1 << fsqrt ),
|
||||||
|
fsqrts_m = (1 << fsqrts ),
|
||||||
isel_m = (1 << isel ),
|
isel_m = (1 << isel ),
|
||||||
lxarxeh_m = (1 << lxarxeh),
|
lxarxeh_m = (1 << lxarxeh),
|
||||||
cmpb_m = (1 << cmpb ),
|
cmpb_m = (1 << cmpb ),
|
||||||
@ -72,6 +74,7 @@ public:
|
|||||||
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
||||||
// CPU instruction support
|
// CPU instruction support
|
||||||
static bool has_fsqrt() { return (_features & fsqrt_m) != 0; }
|
static bool has_fsqrt() { return (_features & fsqrt_m) != 0; }
|
||||||
|
static bool has_fsqrts() { return (_features & fsqrts_m) != 0; }
|
||||||
static bool has_isel() { return (_features & isel_m) != 0; }
|
static bool has_isel() { return (_features & isel_m) != 0; }
|
||||||
static bool has_lxarxeh() { return (_features & lxarxeh_m) !=0; }
|
static bool has_lxarxeh() { return (_features & lxarxeh_m) !=0; }
|
||||||
static bool has_cmpb() { return (_features & cmpb_m) != 0; }
|
static bool has_cmpb() { return (_features & cmpb_m) != 0; }
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -79,7 +79,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
|||||||
address npe_addr = __ pc(); // npe = null pointer exception
|
address npe_addr = __ pc(); // npe = null pointer exception
|
||||||
__ load_klass_with_trap_null_check(rcvr_klass, R3);
|
__ load_klass_with_trap_null_check(rcvr_klass, R3);
|
||||||
|
|
||||||
// Set methodOop (in case of interpreted method), and destination address.
|
// Set method (in case of interpreted method), and destination address.
|
||||||
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
@ -161,8 +161,6 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
address npe_addr = __ pc(); // npe = null pointer exception
|
address npe_addr = __ pc(); // npe = null pointer exception
|
||||||
__ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1);
|
__ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1);
|
||||||
|
|
||||||
//__ ld(rcvr_klass, oopDesc::klass_offset_in_bytes(), R3_ARG1);
|
|
||||||
|
|
||||||
BLOCK_COMMENT("Load start of itable entries into itable_entry.");
|
BLOCK_COMMENT("Load start of itable entries into itable_entry.");
|
||||||
__ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
|
__ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
|
||||||
__ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size() * wordSize));
|
__ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size() * wordSize));
|
||||||
@ -199,7 +197,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
itable_offset_search_inc;
|
itable_offset_search_inc;
|
||||||
__ lwz(vtable_offset, vtable_offset_offset, itable_entry_addr);
|
__ lwz(vtable_offset, vtable_offset_offset, itable_entry_addr);
|
||||||
|
|
||||||
// Compute itableMethodEntry and get methodOop and entry point for compiler.
|
// Compute itableMethodEntry and get method and entry point for compiler.
|
||||||
const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
|
const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
|
||||||
itableMethodEntry::method_offset_in_bytes();
|
itableMethodEntry::method_offset_in_bytes();
|
||||||
|
|
||||||
@ -211,7 +209,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
Label ok;
|
Label ok;
|
||||||
__ cmpd(CCR0, R19_method, 0);
|
__ cmpd(CCR0, R19_method, 0);
|
||||||
__ bne(CCR0, ok);
|
__ bne(CCR0, ok);
|
||||||
__ stop("methodOop is null", 103);
|
__ stop("method is null", 103);
|
||||||
__ bind(ok);
|
__ bind(ok);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -3320,7 +3320,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
|
|||||||
|
|
||||||
// if tmp is invalid, then the function being called doesn't destroy the thread
|
// if tmp is invalid, then the function being called doesn't destroy the thread
|
||||||
if (tmp->is_valid()) {
|
if (tmp->is_valid()) {
|
||||||
__ save_thread(tmp->as_register());
|
__ save_thread(tmp->as_pointer_register());
|
||||||
}
|
}
|
||||||
__ call(dest, relocInfo::runtime_call_type);
|
__ call(dest, relocInfo::runtime_call_type);
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
@ -3328,7 +3328,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
|
|||||||
add_call_info_here(info);
|
add_call_info_here(info);
|
||||||
}
|
}
|
||||||
if (tmp->is_valid()) {
|
if (tmp->is_valid()) {
|
||||||
__ restore_thread(tmp->as_register());
|
__ restore_thread(tmp->as_pointer_register());
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
@ -69,7 +69,7 @@ void LIRItem::load_nonconstant() {
|
|||||||
LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexception_opr; }
|
LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexception_opr; }
|
||||||
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
|
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
|
||||||
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
|
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
|
||||||
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(T_INT); }
|
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); }
|
||||||
|
|
||||||
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
|
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
|
||||||
LIR_Opr opr;
|
LIR_Opr opr;
|
||||||
|
@ -66,6 +66,4 @@ define_pd_global(bool, OptimizeSinglePrecision, false);
|
|||||||
define_pd_global(bool, CSEArrayLength, true );
|
define_pd_global(bool, CSEArrayLength, true );
|
||||||
define_pd_global(bool, TwoOperandLIRForm, false);
|
define_pd_global(bool, TwoOperandLIRForm, false);
|
||||||
|
|
||||||
define_pd_global(intx, SafepointPollOffset, 0 );
|
|
||||||
|
|
||||||
#endif // CPU_SPARC_VM_C1_GLOBALS_SPARC_HPP
|
#endif // CPU_SPARC_VM_C1_GLOBALS_SPARC_HPP
|
||||||
|
@ -457,6 +457,13 @@ definitions %{
|
|||||||
// This is a block of C++ code which provides values, functions, and
|
// This is a block of C++ code which provides values, functions, and
|
||||||
// definitions necessary in the rest of the architecture description
|
// definitions necessary in the rest of the architecture description
|
||||||
source_hpp %{
|
source_hpp %{
|
||||||
|
// Header information of the source block.
|
||||||
|
// Method declarations/definitions which are used outside
|
||||||
|
// the ad-scope can conveniently be defined here.
|
||||||
|
//
|
||||||
|
// To keep related declarations/definitions/uses close together,
|
||||||
|
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
|
||||||
|
|
||||||
// Must be visible to the DFA in dfa_sparc.cpp
|
// Must be visible to the DFA in dfa_sparc.cpp
|
||||||
extern bool can_branch_register( Node *bol, Node *cmp );
|
extern bool can_branch_register( Node *bol, Node *cmp );
|
||||||
|
|
||||||
@ -468,6 +475,46 @@ extern bool use_block_zeroing(Node* count);
|
|||||||
#define LONG_HI_REG(x) (x)
|
#define LONG_HI_REG(x) (x)
|
||||||
#define LONG_LO_REG(x) (x)
|
#define LONG_LO_REG(x) (x)
|
||||||
|
|
||||||
|
class CallStubImpl {
|
||||||
|
|
||||||
|
//--------------------------------------------------------------
|
||||||
|
//---< Used for optimization in Compile::Shorten_branches >---
|
||||||
|
//--------------------------------------------------------------
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Size of call trampoline stub.
|
||||||
|
static uint size_call_trampoline() {
|
||||||
|
return 0; // no call trampolines on this platform
|
||||||
|
}
|
||||||
|
|
||||||
|
// number of relocations needed by a call trampoline stub
|
||||||
|
static uint reloc_call_trampoline() {
|
||||||
|
return 0; // no call trampolines on this platform
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class HandlerImpl {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static int emit_exception_handler(CodeBuffer &cbuf);
|
||||||
|
static int emit_deopt_handler(CodeBuffer& cbuf);
|
||||||
|
|
||||||
|
static uint size_exception_handler() {
|
||||||
|
if (TraceJumps) {
|
||||||
|
return (400); // just a guess
|
||||||
|
}
|
||||||
|
return ( NativeJump::instruction_size ); // sethi;jmp;nop
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint size_deopt_handler() {
|
||||||
|
if (TraceJumps) {
|
||||||
|
return (400); // just a guess
|
||||||
|
}
|
||||||
|
return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
%}
|
%}
|
||||||
|
|
||||||
source %{
|
source %{
|
||||||
@ -1710,22 +1757,9 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
|
|||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
|
|
||||||
uint size_exception_handler() {
|
|
||||||
if (TraceJumps) {
|
|
||||||
return (400); // just a guess
|
|
||||||
}
|
|
||||||
return ( NativeJump::instruction_size ); // sethi;jmp;nop
|
|
||||||
}
|
|
||||||
|
|
||||||
uint size_deopt_handler() {
|
|
||||||
if (TraceJumps) {
|
|
||||||
return (400); // just a guess
|
|
||||||
}
|
|
||||||
return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit exception handler code.
|
// Emit exception handler code.
|
||||||
int emit_exception_handler(CodeBuffer& cbuf) {
|
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
|
||||||
Register temp_reg = G3;
|
Register temp_reg = G3;
|
||||||
AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
|
AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
|
||||||
MacroAssembler _masm(&cbuf);
|
MacroAssembler _masm(&cbuf);
|
||||||
@ -1746,7 +1780,7 @@ int emit_exception_handler(CodeBuffer& cbuf) {
|
|||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
int emit_deopt_handler(CodeBuffer& cbuf) {
|
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
|
||||||
// Can't use any of the current frame's registers as we may have deopted
|
// Can't use any of the current frame's registers as we may have deopted
|
||||||
// at a poll and everything (including G3) can be live.
|
// at a poll and everything (including G3) can be live.
|
||||||
Register temp_reg = L0;
|
Register temp_reg = L0;
|
||||||
|
@ -1112,7 +1112,6 @@ void Assembler::bsfl(Register dst, Register src) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::bsrl(Register dst, Register src) {
|
void Assembler::bsrl(Register dst, Register src) {
|
||||||
assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
|
|
||||||
int encode = prefix_and_encode(dst->encoding(), src->encoding());
|
int encode = prefix_and_encode(dst->encoding(), src->encoding());
|
||||||
emit_int8(0x0F);
|
emit_int8(0x0F);
|
||||||
emit_int8((unsigned char)0xBD);
|
emit_int8((unsigned char)0xBD);
|
||||||
@ -2343,6 +2342,11 @@ void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector25
|
|||||||
emit_int8(imm8);
|
emit_int8(imm8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::pause() {
|
||||||
|
emit_int8((unsigned char)0xF3);
|
||||||
|
emit_int8((unsigned char)0x90);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
|
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
|
||||||
assert(VM_Version::supports_sse4_2(), "");
|
assert(VM_Version::supports_sse4_2(), "");
|
||||||
InstructionMark im(this);
|
InstructionMark im(this);
|
||||||
@ -2667,6 +2671,11 @@ void Assembler::rcll(Register dst, int imm8) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::rdtsc() {
|
||||||
|
emit_int8((unsigned char)0x0F);
|
||||||
|
emit_int8((unsigned char)0x31);
|
||||||
|
}
|
||||||
|
|
||||||
// copies data from [esi] to [edi] using rcx pointer sized words
|
// copies data from [esi] to [edi] using rcx pointer sized words
|
||||||
// generic
|
// generic
|
||||||
void Assembler::rep_mov() {
|
void Assembler::rep_mov() {
|
||||||
@ -2976,6 +2985,11 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
|
|||||||
emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
|
emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::xabort(int8_t imm8) {
|
||||||
|
emit_int8((unsigned char)0xC6);
|
||||||
|
emit_int8((unsigned char)0xF8);
|
||||||
|
emit_int8((unsigned char)(imm8 & 0xFF));
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::xaddl(Address dst, Register src) {
|
void Assembler::xaddl(Address dst, Register src) {
|
||||||
InstructionMark im(this);
|
InstructionMark im(this);
|
||||||
@ -2985,6 +2999,24 @@ void Assembler::xaddl(Address dst, Register src) {
|
|||||||
emit_operand(src, dst);
|
emit_operand(src, dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
|
||||||
|
InstructionMark im(this);
|
||||||
|
relocate(rtype);
|
||||||
|
if (abort.is_bound()) {
|
||||||
|
address entry = target(abort);
|
||||||
|
assert(entry != NULL, "abort entry NULL");
|
||||||
|
intptr_t offset = entry - pc();
|
||||||
|
emit_int8((unsigned char)0xC7);
|
||||||
|
emit_int8((unsigned char)0xF8);
|
||||||
|
emit_int32(offset - 6); // 2 opcode + 4 address
|
||||||
|
} else {
|
||||||
|
abort.add_patch_at(code(), locator());
|
||||||
|
emit_int8((unsigned char)0xC7);
|
||||||
|
emit_int8((unsigned char)0xF8);
|
||||||
|
emit_int32(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::xchgl(Register dst, Address src) { // xchg
|
void Assembler::xchgl(Register dst, Address src) { // xchg
|
||||||
InstructionMark im(this);
|
InstructionMark im(this);
|
||||||
prefix(src, dst);
|
prefix(src, dst);
|
||||||
@ -2998,6 +3030,12 @@ void Assembler::xchgl(Register dst, Register src) {
|
|||||||
emit_int8((unsigned char)(0xC0 | encode));
|
emit_int8((unsigned char)(0xC0 | encode));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::xend() {
|
||||||
|
emit_int8((unsigned char)0x0F);
|
||||||
|
emit_int8((unsigned char)0x01);
|
||||||
|
emit_int8((unsigned char)0xD5);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::xgetbv() {
|
void Assembler::xgetbv() {
|
||||||
emit_int8(0x0F);
|
emit_int8(0x0F);
|
||||||
emit_int8(0x01);
|
emit_int8(0x01);
|
||||||
@ -4938,7 +4976,6 @@ void Assembler::bsfq(Register dst, Register src) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::bsrq(Register dst, Register src) {
|
void Assembler::bsrq(Register dst, Register src) {
|
||||||
assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
|
|
||||||
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
|
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
|
||||||
emit_int8(0x0F);
|
emit_int8(0x0F);
|
||||||
emit_int8((unsigned char)0xBD);
|
emit_int8((unsigned char)0xBD);
|
||||||
|
@ -1451,6 +1451,8 @@ private:
|
|||||||
// Pemutation of 64bit words
|
// Pemutation of 64bit words
|
||||||
void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
|
void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
|
||||||
|
|
||||||
|
void pause();
|
||||||
|
|
||||||
// SSE4.2 string instructions
|
// SSE4.2 string instructions
|
||||||
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
|
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
|
||||||
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
|
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
|
||||||
@ -1535,6 +1537,8 @@ private:
|
|||||||
|
|
||||||
void rclq(Register dst, int imm8);
|
void rclq(Register dst, int imm8);
|
||||||
|
|
||||||
|
void rdtsc();
|
||||||
|
|
||||||
void ret(int imm16);
|
void ret(int imm16);
|
||||||
|
|
||||||
void sahf();
|
void sahf();
|
||||||
@ -1632,16 +1636,22 @@ private:
|
|||||||
void ucomiss(XMMRegister dst, Address src);
|
void ucomiss(XMMRegister dst, Address src);
|
||||||
void ucomiss(XMMRegister dst, XMMRegister src);
|
void ucomiss(XMMRegister dst, XMMRegister src);
|
||||||
|
|
||||||
|
void xabort(int8_t imm8);
|
||||||
|
|
||||||
void xaddl(Address dst, Register src);
|
void xaddl(Address dst, Register src);
|
||||||
|
|
||||||
void xaddq(Address dst, Register src);
|
void xaddq(Address dst, Register src);
|
||||||
|
|
||||||
|
void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
|
||||||
|
|
||||||
void xchgl(Register reg, Address adr);
|
void xchgl(Register reg, Address adr);
|
||||||
void xchgl(Register dst, Register src);
|
void xchgl(Register dst, Register src);
|
||||||
|
|
||||||
void xchgq(Register reg, Address adr);
|
void xchgq(Register reg, Address adr);
|
||||||
void xchgq(Register dst, Register src);
|
void xchgq(Register dst, Register src);
|
||||||
|
|
||||||
|
void xend();
|
||||||
|
|
||||||
// Get Value of Extended Control Register
|
// Get Value of Extended Control Register
|
||||||
void xgetbv();
|
void xgetbv();
|
||||||
|
|
||||||
|
@ -604,8 +604,7 @@ void LIR_Assembler::return_op(LIR_Opr result) {
|
|||||||
|
|
||||||
// Note: we do not need to round double result; float result has the right precision
|
// Note: we do not need to round double result; float result has the right precision
|
||||||
// the poll sets the condition code, but no data registers
|
// the poll sets the condition code, but no data registers
|
||||||
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
|
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
|
||||||
relocInfo::poll_return_type);
|
|
||||||
|
|
||||||
if (Assembler::is_polling_page_far()) {
|
if (Assembler::is_polling_page_far()) {
|
||||||
__ lea(rscratch1, polling_page);
|
__ lea(rscratch1, polling_page);
|
||||||
@ -619,8 +618,7 @@ void LIR_Assembler::return_op(LIR_Opr result) {
|
|||||||
|
|
||||||
|
|
||||||
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
|
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
|
||||||
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
|
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
|
||||||
relocInfo::poll_type);
|
|
||||||
guarantee(info != NULL, "Shouldn't be NULL");
|
guarantee(info != NULL, "Shouldn't be NULL");
|
||||||
int offset = __ offset();
|
int offset = __ offset();
|
||||||
if (Assembler::is_polling_page_far()) {
|
if (Assembler::is_polling_page_far()) {
|
||||||
|
@ -65,6 +65,4 @@ define_pd_global(bool, OptimizeSinglePrecision, true );
|
|||||||
define_pd_global(bool, CSEArrayLength, false);
|
define_pd_global(bool, CSEArrayLength, false);
|
||||||
define_pd_global(bool, TwoOperandLIRForm, true );
|
define_pd_global(bool, TwoOperandLIRForm, true );
|
||||||
|
|
||||||
define_pd_global(intx, SafepointPollOffset, 256 );
|
|
||||||
|
|
||||||
#endif // CPU_X86_VM_C1_GLOBALS_X86_HPP
|
#endif // CPU_X86_VM_C1_GLOBALS_X86_HPP
|
||||||
|
@ -129,6 +129,42 @@ define_pd_global(uintx, TypeProfileLevel, 111);
|
|||||||
product(bool, UseFastStosb, false, \
|
product(bool, UseFastStosb, false, \
|
||||||
"Use fast-string operation for zeroing: rep stosb") \
|
"Use fast-string operation for zeroing: rep stosb") \
|
||||||
\
|
\
|
||||||
|
/* Use Restricted Transactional Memory for lock eliding */ \
|
||||||
|
product(bool, UseRTMLocking, false, \
|
||||||
|
"Enable RTM lock eliding for inflated locks in compiled code") \
|
||||||
|
\
|
||||||
|
experimental(bool, UseRTMForStackLocks, false, \
|
||||||
|
"Enable RTM lock eliding for stack locks in compiled code") \
|
||||||
|
\
|
||||||
|
product(bool, UseRTMDeopt, false, \
|
||||||
|
"Perform deopt and recompilation based on RTM abort ratio") \
|
||||||
|
\
|
||||||
|
product(uintx, RTMRetryCount, 5, \
|
||||||
|
"Number of RTM retries on lock abort or busy") \
|
||||||
|
\
|
||||||
|
experimental(intx, RTMSpinLoopCount, 100, \
|
||||||
|
"Spin count for lock to become free before RTM retry") \
|
||||||
|
\
|
||||||
|
experimental(intx, RTMAbortThreshold, 1000, \
|
||||||
|
"Calculate abort ratio after this number of aborts") \
|
||||||
|
\
|
||||||
|
experimental(intx, RTMLockingThreshold, 10000, \
|
||||||
|
"Lock count at which to do RTM lock eliding without " \
|
||||||
|
"abort ratio calculation") \
|
||||||
|
\
|
||||||
|
experimental(intx, RTMAbortRatio, 50, \
|
||||||
|
"Lock abort ratio at which to stop use RTM lock eliding") \
|
||||||
|
\
|
||||||
|
experimental(intx, RTMTotalCountIncrRate, 64, \
|
||||||
|
"Increment total RTM attempted lock count once every n times") \
|
||||||
|
\
|
||||||
|
experimental(intx, RTMLockingCalculationDelay, 0, \
|
||||||
|
"Number of milliseconds to wait before start calculating aborts " \
|
||||||
|
"for RTM locking") \
|
||||||
|
\
|
||||||
|
experimental(bool, UseRTMXendForLockBusy, false, \
|
||||||
|
"Use RTM Xend instead of Xabort when lock busy") \
|
||||||
|
\
|
||||||
/* assembler */ \
|
/* assembler */ \
|
||||||
product(bool, Use486InstrsOnly, false, \
|
product(bool, Use486InstrsOnly, false, \
|
||||||
"Use 80486 Compliant instruction subset") \
|
"Use 80486 Compliant instruction subset") \
|
||||||
|
@ -301,7 +301,9 @@ void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
|
|||||||
mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
|
mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::movptr(Register dst, AddressLiteral src) {
|
void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
|
||||||
|
// scratch register is not used,
|
||||||
|
// it is defined to match parameters of 64-bit version of this method.
|
||||||
if (src.is_lval()) {
|
if (src.is_lval()) {
|
||||||
mov_literal32(dst, (intptr_t)src.target(), src.rspec());
|
mov_literal32(dst, (intptr_t)src.target(), src.rspec());
|
||||||
} else {
|
} else {
|
||||||
@ -613,6 +615,15 @@ void MacroAssembler::decrementq(Address dst, int value) {
|
|||||||
/* else */ { subq(dst, value) ; return; }
|
/* else */ { subq(dst, value) ; return; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::incrementq(AddressLiteral dst) {
|
||||||
|
if (reachable(dst)) {
|
||||||
|
incrementq(as_Address(dst));
|
||||||
|
} else {
|
||||||
|
lea(rscratch1, dst);
|
||||||
|
incrementq(Address(rscratch1, 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void MacroAssembler::incrementq(Register reg, int value) {
|
void MacroAssembler::incrementq(Register reg, int value) {
|
||||||
if (value == min_jint) { addq(reg, value); return; }
|
if (value == min_jint) { addq(reg, value); return; }
|
||||||
if (value < 0) { decrementq(reg, -value); return; }
|
if (value < 0) { decrementq(reg, -value); return; }
|
||||||
@ -681,15 +692,15 @@ void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
|
|||||||
movq(dst, rscratch1);
|
movq(dst, rscratch1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::movptr(Register dst, AddressLiteral src) {
|
void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
|
||||||
if (src.is_lval()) {
|
if (src.is_lval()) {
|
||||||
mov_literal64(dst, (intptr_t)src.target(), src.rspec());
|
mov_literal64(dst, (intptr_t)src.target(), src.rspec());
|
||||||
} else {
|
} else {
|
||||||
if (reachable(src)) {
|
if (reachable(src)) {
|
||||||
movq(dst, as_Address(src));
|
movq(dst, as_Address(src));
|
||||||
} else {
|
} else {
|
||||||
lea(rscratch1, src);
|
lea(scratch, src);
|
||||||
movq(dst, Address(rscratch1,0));
|
movq(dst, Address(scratch, 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -988,21 +999,38 @@ void MacroAssembler::andptr(Register dst, int32_t imm32) {
|
|||||||
LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
|
LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
|
void MacroAssembler::atomic_incl(Address counter_addr) {
|
||||||
pushf();
|
if (os::is_MP())
|
||||||
if (reachable(counter_addr)) {
|
lock();
|
||||||
if (os::is_MP())
|
incrementl(counter_addr);
|
||||||
lock();
|
|
||||||
incrementl(as_Address(counter_addr));
|
|
||||||
} else {
|
|
||||||
lea(rscratch1, counter_addr);
|
|
||||||
if (os::is_MP())
|
|
||||||
lock();
|
|
||||||
incrementl(Address(rscratch1, 0));
|
|
||||||
}
|
|
||||||
popf();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
|
||||||
|
if (reachable(counter_addr)) {
|
||||||
|
atomic_incl(as_Address(counter_addr));
|
||||||
|
} else {
|
||||||
|
lea(scr, counter_addr);
|
||||||
|
atomic_incl(Address(scr, 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
void MacroAssembler::atomic_incq(Address counter_addr) {
|
||||||
|
if (os::is_MP())
|
||||||
|
lock();
|
||||||
|
incrementq(counter_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {
|
||||||
|
if (reachable(counter_addr)) {
|
||||||
|
atomic_incq(as_Address(counter_addr));
|
||||||
|
} else {
|
||||||
|
lea(scr, counter_addr);
|
||||||
|
atomic_incq(Address(scr, 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Writes to stack successive pages until offset reached to check for
|
// Writes to stack successive pages until offset reached to check for
|
||||||
// stack overflow + shadow pages. This clobbers tmp.
|
// stack overflow + shadow pages. This clobbers tmp.
|
||||||
void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
||||||
@ -1274,6 +1302,325 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, La
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
|
||||||
|
// Update rtm_counters based on abort status
|
||||||
|
// input: abort_status
|
||||||
|
// rtm_counters (RTMLockingCounters*)
|
||||||
|
// flags are killed
|
||||||
|
void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
|
||||||
|
|
||||||
|
atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset()));
|
||||||
|
if (PrintPreciseRTMLockingStatistics) {
|
||||||
|
for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
|
||||||
|
Label check_abort;
|
||||||
|
testl(abort_status, (1<<i));
|
||||||
|
jccb(Assembler::equal, check_abort);
|
||||||
|
atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
|
||||||
|
bind(check_abort);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Branch if (random & (count-1) != 0), count is 2^n
|
||||||
|
// tmp, scr and flags are killed
|
||||||
|
void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
|
||||||
|
assert(tmp == rax, "");
|
||||||
|
assert(scr == rdx, "");
|
||||||
|
rdtsc(); // modifies EDX:EAX
|
||||||
|
andptr(tmp, count-1);
|
||||||
|
jccb(Assembler::notZero, brLabel);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform abort ratio calculation, set no_rtm bit if high ratio
|
||||||
|
// input: rtm_counters_Reg (RTMLockingCounters* address)
|
||||||
|
// tmpReg, rtm_counters_Reg and flags are killed
|
||||||
|
void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
|
||||||
|
Register rtm_counters_Reg,
|
||||||
|
RTMLockingCounters* rtm_counters,
|
||||||
|
Metadata* method_data) {
|
||||||
|
Label L_done, L_check_always_rtm1, L_check_always_rtm2;
|
||||||
|
|
||||||
|
if (RTMLockingCalculationDelay > 0) {
|
||||||
|
// Delay calculation
|
||||||
|
movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg);
|
||||||
|
testptr(tmpReg, tmpReg);
|
||||||
|
jccb(Assembler::equal, L_done);
|
||||||
|
}
|
||||||
|
// Abort ratio calculation only if abort_count > RTMAbortThreshold
|
||||||
|
// Aborted transactions = abort_count * 100
|
||||||
|
// All transactions = total_count * RTMTotalCountIncrRate
|
||||||
|
// Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
|
||||||
|
|
||||||
|
movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset()));
|
||||||
|
cmpptr(tmpReg, RTMAbortThreshold);
|
||||||
|
jccb(Assembler::below, L_check_always_rtm2);
|
||||||
|
imulptr(tmpReg, tmpReg, 100);
|
||||||
|
|
||||||
|
Register scrReg = rtm_counters_Reg;
|
||||||
|
movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
|
||||||
|
imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
|
||||||
|
imulptr(scrReg, scrReg, RTMAbortRatio);
|
||||||
|
cmpptr(tmpReg, scrReg);
|
||||||
|
jccb(Assembler::below, L_check_always_rtm1);
|
||||||
|
if (method_data != NULL) {
|
||||||
|
// set rtm_state to "no rtm" in MDO
|
||||||
|
mov_metadata(tmpReg, method_data);
|
||||||
|
if (os::is_MP()) {
|
||||||
|
lock();
|
||||||
|
}
|
||||||
|
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
|
||||||
|
}
|
||||||
|
jmpb(L_done);
|
||||||
|
bind(L_check_always_rtm1);
|
||||||
|
// Reload RTMLockingCounters* address
|
||||||
|
lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
|
||||||
|
bind(L_check_always_rtm2);
|
||||||
|
movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
|
||||||
|
cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
|
||||||
|
jccb(Assembler::below, L_done);
|
||||||
|
if (method_data != NULL) {
|
||||||
|
// set rtm_state to "always rtm" in MDO
|
||||||
|
mov_metadata(tmpReg, method_data);
|
||||||
|
if (os::is_MP()) {
|
||||||
|
lock();
|
||||||
|
}
|
||||||
|
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
|
||||||
|
}
|
||||||
|
bind(L_done);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update counters and perform abort ratio calculation
|
||||||
|
// input: abort_status_Reg
|
||||||
|
// rtm_counters_Reg, flags are killed
|
||||||
|
void MacroAssembler::rtm_profiling(Register abort_status_Reg,
|
||||||
|
Register rtm_counters_Reg,
|
||||||
|
RTMLockingCounters* rtm_counters,
|
||||||
|
Metadata* method_data,
|
||||||
|
bool profile_rtm) {
|
||||||
|
|
||||||
|
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
|
||||||
|
// update rtm counters based on rax value at abort
|
||||||
|
// reads abort_status_Reg, updates flags
|
||||||
|
lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
|
||||||
|
rtm_counters_update(abort_status_Reg, rtm_counters_Reg);
|
||||||
|
if (profile_rtm) {
|
||||||
|
// Save abort status because abort_status_Reg is used by following code.
|
||||||
|
if (RTMRetryCount > 0) {
|
||||||
|
push(abort_status_Reg);
|
||||||
|
}
|
||||||
|
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
|
||||||
|
rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
|
||||||
|
// restore abort status
|
||||||
|
if (RTMRetryCount > 0) {
|
||||||
|
pop(abort_status_Reg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4)
|
||||||
|
// inputs: retry_count_Reg
|
||||||
|
// : abort_status_Reg
|
||||||
|
// output: retry_count_Reg decremented by 1
|
||||||
|
// flags are killed
|
||||||
|
void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
|
||||||
|
Label doneRetry;
|
||||||
|
assert(abort_status_Reg == rax, "");
|
||||||
|
// The abort reason bits are in eax (see all states in rtmLocking.hpp)
|
||||||
|
// 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4)
|
||||||
|
// if reason is in 0x6 and retry count != 0 then retry
|
||||||
|
andptr(abort_status_Reg, 0x6);
|
||||||
|
jccb(Assembler::zero, doneRetry);
|
||||||
|
testl(retry_count_Reg, retry_count_Reg);
|
||||||
|
jccb(Assembler::zero, doneRetry);
|
||||||
|
pause();
|
||||||
|
decrementl(retry_count_Reg);
|
||||||
|
jmp(retryLabel);
|
||||||
|
bind(doneRetry);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spin and retry if lock is busy,
|
||||||
|
// inputs: box_Reg (monitor address)
|
||||||
|
// : retry_count_Reg
|
||||||
|
// output: retry_count_Reg decremented by 1
|
||||||
|
// : clear z flag if retry count exceeded
|
||||||
|
// tmp_Reg, scr_Reg, flags are killed
|
||||||
|
void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
|
||||||
|
Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
|
||||||
|
Label SpinLoop, SpinExit, doneRetry;
|
||||||
|
// Clean monitor_value bit to get valid pointer
|
||||||
|
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
|
||||||
|
|
||||||
|
testl(retry_count_Reg, retry_count_Reg);
|
||||||
|
jccb(Assembler::zero, doneRetry);
|
||||||
|
decrementl(retry_count_Reg);
|
||||||
|
movptr(scr_Reg, RTMSpinLoopCount);
|
||||||
|
|
||||||
|
bind(SpinLoop);
|
||||||
|
pause();
|
||||||
|
decrementl(scr_Reg);
|
||||||
|
jccb(Assembler::lessEqual, SpinExit);
|
||||||
|
movptr(tmp_Reg, Address(box_Reg, owner_offset));
|
||||||
|
testptr(tmp_Reg, tmp_Reg);
|
||||||
|
jccb(Assembler::notZero, SpinLoop);
|
||||||
|
|
||||||
|
bind(SpinExit);
|
||||||
|
jmp(retryLabel);
|
||||||
|
bind(doneRetry);
|
||||||
|
incrementl(retry_count_Reg); // clear z flag
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use RTM for normal stack locks
|
||||||
|
// Input: objReg (object to lock)
|
||||||
|
void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg,
|
||||||
|
Register retry_on_abort_count_Reg,
|
||||||
|
RTMLockingCounters* stack_rtm_counters,
|
||||||
|
Metadata* method_data, bool profile_rtm,
|
||||||
|
Label& DONE_LABEL, Label& IsInflated) {
|
||||||
|
assert(UseRTMForStackLocks, "why call this otherwise?");
|
||||||
|
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
|
||||||
|
assert(tmpReg == rax, "");
|
||||||
|
assert(scrReg == rdx, "");
|
||||||
|
Label L_rtm_retry, L_decrement_retry, L_on_abort;
|
||||||
|
|
||||||
|
if (RTMRetryCount > 0) {
|
||||||
|
movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
|
||||||
|
bind(L_rtm_retry);
|
||||||
|
}
|
||||||
|
if (!UseRTMXendForLockBusy) {
|
||||||
|
movptr(tmpReg, Address(objReg, 0));
|
||||||
|
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
|
||||||
|
jcc(Assembler::notZero, IsInflated);
|
||||||
|
}
|
||||||
|
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||||
|
Label L_noincrement;
|
||||||
|
if (RTMTotalCountIncrRate > 1) {
|
||||||
|
// tmpReg, scrReg and flags are killed
|
||||||
|
branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
|
||||||
|
}
|
||||||
|
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
|
||||||
|
atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
|
||||||
|
bind(L_noincrement);
|
||||||
|
}
|
||||||
|
xbegin(L_on_abort);
|
||||||
|
movptr(tmpReg, Address(objReg, 0)); // fetch markword
|
||||||
|
andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
|
||||||
|
cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
|
||||||
|
jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
|
||||||
|
|
||||||
|
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
|
||||||
|
if (UseRTMXendForLockBusy) {
|
||||||
|
xend();
|
||||||
|
movptr(tmpReg, Address(objReg, 0));
|
||||||
|
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
|
||||||
|
jcc(Assembler::notZero, IsInflated);
|
||||||
|
movptr(abort_status_Reg, 0x1); // Set the abort status to 1 (as xabort does)
|
||||||
|
jmp(L_decrement_retry);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
xabort(0);
|
||||||
|
}
|
||||||
|
bind(L_on_abort);
|
||||||
|
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||||
|
rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
|
||||||
|
}
|
||||||
|
bind(L_decrement_retry);
|
||||||
|
if (RTMRetryCount > 0) {
|
||||||
|
// retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
|
||||||
|
rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use RTM for inflating locks
|
||||||
|
// inputs: objReg (object to lock)
|
||||||
|
// boxReg (on-stack box address (displaced header location) - KILLED)
|
||||||
|
// tmpReg (ObjectMonitor address + 2(monitor_value))
|
||||||
|
void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
|
||||||
|
Register scrReg, Register retry_on_busy_count_Reg,
|
||||||
|
Register retry_on_abort_count_Reg,
|
||||||
|
RTMLockingCounters* rtm_counters,
|
||||||
|
Metadata* method_data, bool profile_rtm,
|
||||||
|
Label& DONE_LABEL) {
|
||||||
|
assert(UseRTMLocking, "why call this otherwise?");
|
||||||
|
assert(tmpReg == rax, "");
|
||||||
|
assert(scrReg == rdx, "");
|
||||||
|
Label L_rtm_retry, L_decrement_retry, L_on_abort;
|
||||||
|
// Clean monitor_value bit to get valid pointer
|
||||||
|
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
|
||||||
|
|
||||||
|
// Without cast to int32_t a movptr will destroy r10 which is typically obj
|
||||||
|
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
||||||
|
movptr(boxReg, tmpReg); // Save ObjectMonitor address
|
||||||
|
|
||||||
|
if (RTMRetryCount > 0) {
|
||||||
|
movl(retry_on_busy_count_Reg, RTMRetryCount); // Retry on lock busy
|
||||||
|
movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
|
||||||
|
bind(L_rtm_retry);
|
||||||
|
}
|
||||||
|
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||||
|
Label L_noincrement;
|
||||||
|
if (RTMTotalCountIncrRate > 1) {
|
||||||
|
// tmpReg, scrReg and flags are killed
|
||||||
|
branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
|
||||||
|
}
|
||||||
|
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
|
||||||
|
atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
|
||||||
|
bind(L_noincrement);
|
||||||
|
}
|
||||||
|
xbegin(L_on_abort);
|
||||||
|
movptr(tmpReg, Address(objReg, 0));
|
||||||
|
movptr(tmpReg, Address(tmpReg, owner_offset));
|
||||||
|
testptr(tmpReg, tmpReg);
|
||||||
|
jcc(Assembler::zero, DONE_LABEL);
|
||||||
|
if (UseRTMXendForLockBusy) {
|
||||||
|
xend();
|
||||||
|
jmp(L_decrement_retry);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
xabort(0);
|
||||||
|
}
|
||||||
|
bind(L_on_abort);
|
||||||
|
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
|
||||||
|
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||||
|
rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
|
||||||
|
}
|
||||||
|
if (RTMRetryCount > 0) {
|
||||||
|
// retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
|
||||||
|
rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
|
||||||
|
}
|
||||||
|
|
||||||
|
movptr(tmpReg, Address(boxReg, owner_offset)) ;
|
||||||
|
testptr(tmpReg, tmpReg) ;
|
||||||
|
jccb(Assembler::notZero, L_decrement_retry) ;
|
||||||
|
|
||||||
|
// Appears unlocked - try to swing _owner from null to non-null.
|
||||||
|
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
|
||||||
|
#ifdef _LP64
|
||||||
|
Register threadReg = r15_thread;
|
||||||
|
#else
|
||||||
|
get_thread(scrReg);
|
||||||
|
Register threadReg = scrReg;
|
||||||
|
#endif
|
||||||
|
if (os::is_MP()) {
|
||||||
|
lock();
|
||||||
|
}
|
||||||
|
cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
|
||||||
|
|
||||||
|
if (RTMRetryCount > 0) {
|
||||||
|
// success done else retry
|
||||||
|
jccb(Assembler::equal, DONE_LABEL) ;
|
||||||
|
bind(L_decrement_retry);
|
||||||
|
// Spin and retry if lock is busy.
|
||||||
|
rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
bind(L_decrement_retry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // INCLUDE_RTM_OPT
|
||||||
|
|
||||||
// Fast_Lock and Fast_Unlock used by C2
|
// Fast_Lock and Fast_Unlock used by C2
|
||||||
|
|
||||||
// Because the transitions from emitted code to the runtime
|
// Because the transitions from emitted code to the runtime
|
||||||
@ -1350,17 +1697,26 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, La
|
|||||||
// box: on-stack box address (displaced header location) - KILLED
|
// box: on-stack box address (displaced header location) - KILLED
|
||||||
// rax,: tmp -- KILLED
|
// rax,: tmp -- KILLED
|
||||||
// scr: tmp -- KILLED
|
// scr: tmp -- KILLED
|
||||||
void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg, BiasedLockingCounters* counters) {
|
void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
|
||||||
|
Register scrReg, Register cx1Reg, Register cx2Reg,
|
||||||
|
BiasedLockingCounters* counters,
|
||||||
|
RTMLockingCounters* rtm_counters,
|
||||||
|
RTMLockingCounters* stack_rtm_counters,
|
||||||
|
Metadata* method_data,
|
||||||
|
bool use_rtm, bool profile_rtm) {
|
||||||
// Ensure the register assignents are disjoint
|
// Ensure the register assignents are disjoint
|
||||||
guarantee (objReg != boxReg, "");
|
assert(tmpReg == rax, "");
|
||||||
guarantee (objReg != tmpReg, "");
|
|
||||||
guarantee (objReg != scrReg, "");
|
if (use_rtm) {
|
||||||
guarantee (boxReg != tmpReg, "");
|
assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
|
||||||
guarantee (boxReg != scrReg, "");
|
} else {
|
||||||
guarantee (tmpReg == rax, "");
|
assert(cx1Reg == noreg, "");
|
||||||
|
assert(cx2Reg == noreg, "");
|
||||||
|
assert_different_registers(objReg, boxReg, tmpReg, scrReg);
|
||||||
|
}
|
||||||
|
|
||||||
if (counters != NULL) {
|
if (counters != NULL) {
|
||||||
atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()));
|
atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
|
||||||
}
|
}
|
||||||
if (EmitSync & 1) {
|
if (EmitSync & 1) {
|
||||||
// set box->dhw = unused_mark (3)
|
// set box->dhw = unused_mark (3)
|
||||||
@ -1419,12 +1775,20 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
|||||||
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
|
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
if (UseRTMForStackLocks && use_rtm) {
|
||||||
|
rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
|
||||||
|
stack_rtm_counters, method_data, profile_rtm,
|
||||||
|
DONE_LABEL, IsInflated);
|
||||||
|
}
|
||||||
|
#endif // INCLUDE_RTM_OPT
|
||||||
|
|
||||||
movptr(tmpReg, Address(objReg, 0)); // [FETCH]
|
movptr(tmpReg, Address(objReg, 0)); // [FETCH]
|
||||||
testl (tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
|
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
|
||||||
jccb (Assembler::notZero, IsInflated);
|
jccb(Assembler::notZero, IsInflated);
|
||||||
|
|
||||||
// Attempt stack-locking ...
|
// Attempt stack-locking ...
|
||||||
orptr (tmpReg, 0x1);
|
orptr (tmpReg, markOopDesc::unlocked_value);
|
||||||
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
|
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
lock();
|
lock();
|
||||||
@ -1434,19 +1798,32 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
|||||||
cond_inc32(Assembler::equal,
|
cond_inc32(Assembler::equal,
|
||||||
ExternalAddress((address)counters->fast_path_entry_count_addr()));
|
ExternalAddress((address)counters->fast_path_entry_count_addr()));
|
||||||
}
|
}
|
||||||
jccb(Assembler::equal, DONE_LABEL);
|
jcc(Assembler::equal, DONE_LABEL); // Success
|
||||||
|
|
||||||
// Recursive locking
|
// Recursive locking.
|
||||||
|
// The object is stack-locked: markword contains stack pointer to BasicLock.
|
||||||
|
// Locked by current thread if difference with current SP is less than one page.
|
||||||
subptr(tmpReg, rsp);
|
subptr(tmpReg, rsp);
|
||||||
|
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
|
||||||
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
|
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
|
||||||
movptr(Address(boxReg, 0), tmpReg);
|
movptr(Address(boxReg, 0), tmpReg);
|
||||||
if (counters != NULL) {
|
if (counters != NULL) {
|
||||||
cond_inc32(Assembler::equal,
|
cond_inc32(Assembler::equal,
|
||||||
ExternalAddress((address)counters->fast_path_entry_count_addr()));
|
ExternalAddress((address)counters->fast_path_entry_count_addr()));
|
||||||
}
|
}
|
||||||
jmpb(DONE_LABEL);
|
jmp(DONE_LABEL);
|
||||||
|
|
||||||
bind(IsInflated);
|
bind(IsInflated);
|
||||||
|
// The object is inflated. tmpReg contains pointer to ObjectMonitor* + 2(monitor_value)
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
// Use the same RTM locking code in 32- and 64-bit VM.
|
||||||
|
if (use_rtm) {
|
||||||
|
rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
|
||||||
|
rtm_counters, method_data, profile_rtm, DONE_LABEL);
|
||||||
|
} else {
|
||||||
|
#endif // INCLUDE_RTM_OPT
|
||||||
|
|
||||||
#ifndef _LP64
|
#ifndef _LP64
|
||||||
// The object is inflated.
|
// The object is inflated.
|
||||||
//
|
//
|
||||||
@ -1576,7 +1953,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
|||||||
// Without cast to int32_t a movptr will destroy r10 which is typically obj
|
// Without cast to int32_t a movptr will destroy r10 which is typically obj
|
||||||
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
||||||
|
|
||||||
mov (boxReg, tmpReg);
|
movptr (boxReg, tmpReg);
|
||||||
movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
||||||
testptr(tmpReg, tmpReg);
|
testptr(tmpReg, tmpReg);
|
||||||
jccb (Assembler::notZero, DONE_LABEL);
|
jccb (Assembler::notZero, DONE_LABEL);
|
||||||
@ -1587,9 +1964,11 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
|||||||
}
|
}
|
||||||
cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
||||||
// Intentional fall-through into DONE_LABEL ...
|
// Intentional fall-through into DONE_LABEL ...
|
||||||
|
#endif // _LP64
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
} // use_rtm()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// DONE_LABEL is a hot target - we'd really like to place it at the
|
// DONE_LABEL is a hot target - we'd really like to place it at the
|
||||||
// start of cache line by padding with NOPs.
|
// start of cache line by padding with NOPs.
|
||||||
// See the AMD and Intel software optimization manuals for the
|
// See the AMD and Intel software optimization manuals for the
|
||||||
@ -1631,11 +2010,9 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
|||||||
// should not be unlocked by "normal" java-level locking and vice-versa. The specification
|
// should not be unlocked by "normal" java-level locking and vice-versa. The specification
|
||||||
// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
|
// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
|
||||||
|
|
||||||
void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) {
|
void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
|
||||||
guarantee (objReg != boxReg, "");
|
assert(boxReg == rax, "");
|
||||||
guarantee (objReg != tmpReg, "");
|
assert_different_registers(objReg, boxReg, tmpReg);
|
||||||
guarantee (boxReg != tmpReg, "");
|
|
||||||
guarantee (boxReg == rax, "");
|
|
||||||
|
|
||||||
if (EmitSync & 4) {
|
if (EmitSync & 4) {
|
||||||
// Disable - inhibit all inlining. Force control through the slow-path
|
// Disable - inhibit all inlining. Force control through the slow-path
|
||||||
@ -1667,14 +2044,41 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
|||||||
biased_locking_exit(objReg, tmpReg, DONE_LABEL);
|
biased_locking_exit(objReg, tmpReg, DONE_LABEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
|
#if INCLUDE_RTM_OPT
|
||||||
movptr(tmpReg, Address(objReg, 0)); // Examine the object's markword
|
if (UseRTMForStackLocks && use_rtm) {
|
||||||
jccb (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
|
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
|
||||||
|
Label L_regular_unlock;
|
||||||
|
movptr(tmpReg, Address(objReg, 0)); // fetch markword
|
||||||
|
andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
|
||||||
|
cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
|
||||||
|
jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
|
||||||
|
xend(); // otherwise end...
|
||||||
|
jmp(DONE_LABEL); // ... and we're done
|
||||||
|
bind(L_regular_unlock);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
testptr(tmpReg, 0x02); // Inflated?
|
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
|
||||||
|
jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
|
||||||
|
movptr(tmpReg, Address(objReg, 0)); // Examine the object's markword
|
||||||
|
testptr(tmpReg, markOopDesc::monitor_value); // Inflated?
|
||||||
jccb (Assembler::zero, Stacked);
|
jccb (Assembler::zero, Stacked);
|
||||||
|
|
||||||
// It's inflated.
|
// It's inflated.
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
if (use_rtm) {
|
||||||
|
Label L_regular_inflated_unlock;
|
||||||
|
// Clean monitor_value bit to get valid pointer
|
||||||
|
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
|
||||||
|
movptr(boxReg, Address(tmpReg, owner_offset));
|
||||||
|
testptr(boxReg, boxReg);
|
||||||
|
jccb(Assembler::notZero, L_regular_inflated_unlock);
|
||||||
|
xend();
|
||||||
|
jmpb(DONE_LABEL);
|
||||||
|
bind(L_regular_inflated_unlock);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Despite our balanced locking property we still check that m->_owner == Self
|
// Despite our balanced locking property we still check that m->_owner == Self
|
||||||
// as java routines or native JNI code called by this thread might
|
// as java routines or native JNI code called by this thread might
|
||||||
// have released the lock.
|
// have released the lock.
|
||||||
@ -2448,7 +2852,9 @@ void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
|
|||||||
Condition negated_cond = negate_condition(cond);
|
Condition negated_cond = negate_condition(cond);
|
||||||
Label L;
|
Label L;
|
||||||
jcc(negated_cond, L);
|
jcc(negated_cond, L);
|
||||||
|
pushf(); // Preserve flags
|
||||||
atomic_incl(counter_addr);
|
atomic_incl(counter_addr);
|
||||||
|
popf();
|
||||||
bind(L);
|
bind(L);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
|
|
||||||
#include "asm/assembler.hpp"
|
#include "asm/assembler.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
#include "runtime/rtmLocking.hpp"
|
||||||
|
|
||||||
|
|
||||||
// MacroAssembler extends Assembler by frequently used macros.
|
// MacroAssembler extends Assembler by frequently used macros.
|
||||||
@ -111,7 +112,8 @@ class MacroAssembler: public Assembler {
|
|||||||
op == 0xE9 /* jmp */ ||
|
op == 0xE9 /* jmp */ ||
|
||||||
op == 0xEB /* short jmp */ ||
|
op == 0xEB /* short jmp */ ||
|
||||||
(op & 0xF0) == 0x70 /* short jcc */ ||
|
(op & 0xF0) == 0x70 /* short jcc */ ||
|
||||||
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
|
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
|
||||||
|
op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
|
||||||
"Invalid opcode at patch point");
|
"Invalid opcode at patch point");
|
||||||
|
|
||||||
if (op == 0xEB || (op & 0xF0) == 0x70) {
|
if (op == 0xEB || (op & 0xF0) == 0x70) {
|
||||||
@ -121,7 +123,7 @@ class MacroAssembler: public Assembler {
|
|||||||
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
|
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
|
||||||
*disp = imm8;
|
*disp = imm8;
|
||||||
} else {
|
} else {
|
||||||
int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
|
int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
|
||||||
int imm32 = target - (address) &disp[1];
|
int imm32 = target - (address) &disp[1];
|
||||||
*disp = imm32;
|
*disp = imm32;
|
||||||
}
|
}
|
||||||
@ -161,7 +163,6 @@ class MacroAssembler: public Assembler {
|
|||||||
void incrementq(Register reg, int value = 1);
|
void incrementq(Register reg, int value = 1);
|
||||||
void incrementq(Address dst, int value = 1);
|
void incrementq(Address dst, int value = 1);
|
||||||
|
|
||||||
|
|
||||||
// Support optimal SSE move instructions.
|
// Support optimal SSE move instructions.
|
||||||
void movflt(XMMRegister dst, XMMRegister src) {
|
void movflt(XMMRegister dst, XMMRegister src) {
|
||||||
if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
|
if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
|
||||||
@ -187,6 +188,8 @@ class MacroAssembler: public Assembler {
|
|||||||
void incrementl(AddressLiteral dst);
|
void incrementl(AddressLiteral dst);
|
||||||
void incrementl(ArrayAddress dst);
|
void incrementl(ArrayAddress dst);
|
||||||
|
|
||||||
|
void incrementq(AddressLiteral dst);
|
||||||
|
|
||||||
// Alignment
|
// Alignment
|
||||||
void align(int modulus);
|
void align(int modulus);
|
||||||
|
|
||||||
@ -654,8 +657,36 @@ class MacroAssembler: public Assembler {
|
|||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
|
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
|
||||||
// See full desription in macroAssembler_x86.cpp.
|
// See full desription in macroAssembler_x86.cpp.
|
||||||
void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
|
void fast_lock(Register obj, Register box, Register tmp,
|
||||||
void fast_unlock(Register obj, Register box, Register tmp);
|
Register scr, Register cx1, Register cx2,
|
||||||
|
BiasedLockingCounters* counters,
|
||||||
|
RTMLockingCounters* rtm_counters,
|
||||||
|
RTMLockingCounters* stack_rtm_counters,
|
||||||
|
Metadata* method_data,
|
||||||
|
bool use_rtm, bool profile_rtm);
|
||||||
|
void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
void rtm_counters_update(Register abort_status, Register rtm_counters);
|
||||||
|
void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
|
||||||
|
void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
|
||||||
|
RTMLockingCounters* rtm_counters,
|
||||||
|
Metadata* method_data);
|
||||||
|
void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
|
||||||
|
RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
|
||||||
|
void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
|
||||||
|
void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
|
||||||
|
void rtm_stack_locking(Register obj, Register tmp, Register scr,
|
||||||
|
Register retry_on_abort_count,
|
||||||
|
RTMLockingCounters* stack_rtm_counters,
|
||||||
|
Metadata* method_data, bool profile_rtm,
|
||||||
|
Label& DONE_LABEL, Label& IsInflated);
|
||||||
|
void rtm_inflated_locking(Register obj, Register box, Register tmp,
|
||||||
|
Register scr, Register retry_on_busy_count,
|
||||||
|
Register retry_on_abort_count,
|
||||||
|
RTMLockingCounters* rtm_counters,
|
||||||
|
Metadata* method_data, bool profile_rtm,
|
||||||
|
Label& DONE_LABEL);
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Condition negate_condition(Condition cond);
|
Condition negate_condition(Condition cond);
|
||||||
@ -721,6 +752,7 @@ class MacroAssembler: public Assembler {
|
|||||||
|
|
||||||
|
|
||||||
void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
|
void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
|
||||||
|
void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
|
||||||
|
|
||||||
|
|
||||||
void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
|
void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
|
||||||
@ -762,7 +794,14 @@ class MacroAssembler: public Assembler {
|
|||||||
// Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
|
// Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
|
||||||
void cond_inc32(Condition cond, AddressLiteral counter_addr);
|
void cond_inc32(Condition cond, AddressLiteral counter_addr);
|
||||||
// Unconditional atomic increment.
|
// Unconditional atomic increment.
|
||||||
void atomic_incl(AddressLiteral counter_addr);
|
void atomic_incl(Address counter_addr);
|
||||||
|
void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
|
||||||
|
#ifdef _LP64
|
||||||
|
void atomic_incq(Address counter_addr);
|
||||||
|
void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
|
||||||
|
#endif
|
||||||
|
void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
|
||||||
|
void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
|
||||||
|
|
||||||
void lea(Register dst, AddressLiteral adr);
|
void lea(Register dst, AddressLiteral adr);
|
||||||
void lea(Address dst, AddressLiteral adr);
|
void lea(Address dst, AddressLiteral adr);
|
||||||
@ -1074,7 +1113,11 @@ public:
|
|||||||
|
|
||||||
void movptr(Register dst, Address src);
|
void movptr(Register dst, Address src);
|
||||||
|
|
||||||
void movptr(Register dst, AddressLiteral src);
|
#ifdef _LP64
|
||||||
|
void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
|
||||||
|
#else
|
||||||
|
void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
|
||||||
|
#endif
|
||||||
|
|
||||||
void movptr(Register dst, intptr_t src);
|
void movptr(Register dst, intptr_t src);
|
||||||
void movptr(Register dst, Register src);
|
void movptr(Register dst, Register src);
|
||||||
|
60
hotspot/src/cpu/x86/vm/rtmLocking.cpp
Normal file
60
hotspot/src/cpu/x86/vm/rtmLocking.cpp
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "memory/allocation.inline.hpp"
|
||||||
|
#include "runtime/task.hpp"
|
||||||
|
#include "runtime/rtmLocking.hpp"
|
||||||
|
|
||||||
|
// One-shot PeriodicTask subclass for enabling RTM locking
|
||||||
|
uintx RTMLockingCounters::_calculation_flag = 0;
|
||||||
|
|
||||||
|
class RTMLockingCalculationTask : public PeriodicTask {
|
||||||
|
public:
|
||||||
|
RTMLockingCalculationTask(size_t interval_time) : PeriodicTask(interval_time){ }
|
||||||
|
|
||||||
|
virtual void task() {
|
||||||
|
RTMLockingCounters::_calculation_flag = 1;
|
||||||
|
// Reclaim our storage and disenroll ourself
|
||||||
|
delete this;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void RTMLockingCounters::init() {
|
||||||
|
if (UseRTMLocking && RTMLockingCalculationDelay > 0) {
|
||||||
|
RTMLockingCalculationTask* task = new RTMLockingCalculationTask(RTMLockingCalculationDelay);
|
||||||
|
task->enroll();
|
||||||
|
} else {
|
||||||
|
_calculation_flag = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------print_on-------------------------------
|
||||||
|
void RTMLockingCounters::print_on(outputStream* st) {
|
||||||
|
tty->print_cr("# rtm locks total (estimated): " UINTX_FORMAT, _total_count * RTMTotalCountIncrRate);
|
||||||
|
tty->print_cr("# rtm lock aborts : " UINTX_FORMAT, _abort_count);
|
||||||
|
for (int i = 0; i < ABORT_STATUS_LIMIT; i++) {
|
||||||
|
tty->print_cr("# rtm lock aborts %d: " UINTX_FORMAT, i, _abortX_count[i]);
|
||||||
|
}
|
||||||
|
}
|
@ -1817,6 +1817,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// Frame is now completed as far as size and linkage.
|
// Frame is now completed as far as size and linkage.
|
||||||
int frame_complete = ((intptr_t)__ pc()) - start;
|
int frame_complete = ((intptr_t)__ pc()) - start;
|
||||||
|
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
// Abort RTM transaction before calling JNI
|
||||||
|
// because critical section will be large and will be
|
||||||
|
// aborted anyway. Also nmethod could be deoptimized.
|
||||||
|
__ xabort(0);
|
||||||
|
}
|
||||||
|
|
||||||
// Calculate the difference between rsp and rbp,. We need to know it
|
// Calculate the difference between rsp and rbp,. We need to know it
|
||||||
// after the native call because on windows Java Natives will pop
|
// after the native call because on windows Java Natives will pop
|
||||||
// the arguments and it is painful to do rsp relative addressing
|
// the arguments and it is painful to do rsp relative addressing
|
||||||
@ -3170,6 +3177,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
|
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
// Abort RTM transaction before possible nmethod deoptimization.
|
||||||
|
__ xabort(0);
|
||||||
|
}
|
||||||
|
|
||||||
// Push self-frame.
|
// Push self-frame.
|
||||||
__ subptr(rsp, return_off*wordSize); // Epilog!
|
__ subptr(rsp, return_off*wordSize); // Epilog!
|
||||||
|
|
||||||
@ -3355,6 +3368,14 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
|||||||
address call_pc = NULL;
|
address call_pc = NULL;
|
||||||
bool cause_return = (poll_type == POLL_AT_RETURN);
|
bool cause_return = (poll_type == POLL_AT_RETURN);
|
||||||
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
|
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
|
||||||
|
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
// Abort RTM transaction before calling runtime
|
||||||
|
// because critical section will be large and will be
|
||||||
|
// aborted anyway. Also nmethod could be deoptimized.
|
||||||
|
__ xabort(0);
|
||||||
|
}
|
||||||
|
|
||||||
// If cause_return is true we are at a poll_return and there is
|
// If cause_return is true we are at a poll_return and there is
|
||||||
// the return address on the stack to the caller on the nmethod
|
// the return address on the stack to the caller on the nmethod
|
||||||
// that is safepoint. We can leave this return on the stack and
|
// that is safepoint. We can leave this return on the stack and
|
||||||
|
@ -2012,6 +2012,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
// Frame is now completed as far as size and linkage.
|
// Frame is now completed as far as size and linkage.
|
||||||
int frame_complete = ((intptr_t)__ pc()) - start;
|
int frame_complete = ((intptr_t)__ pc()) - start;
|
||||||
|
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
// Abort RTM transaction before calling JNI
|
||||||
|
// because critical section will be large and will be
|
||||||
|
// aborted anyway. Also nmethod could be deoptimized.
|
||||||
|
__ xabort(0);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
{
|
{
|
||||||
Label L;
|
Label L;
|
||||||
@ -3612,6 +3619,11 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
|||||||
|
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
|
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
// Abort RTM transaction before possible nmethod deoptimization.
|
||||||
|
__ xabort(0);
|
||||||
|
}
|
||||||
|
|
||||||
// Push self-frame. We get here with a return address on the
|
// Push self-frame. We get here with a return address on the
|
||||||
// stack, so rsp is 8-byte aligned until we allocate our frame.
|
// stack, so rsp is 8-byte aligned until we allocate our frame.
|
||||||
__ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
|
__ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
|
||||||
@ -3792,6 +3804,13 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
|||||||
bool cause_return = (poll_type == POLL_AT_RETURN);
|
bool cause_return = (poll_type == POLL_AT_RETURN);
|
||||||
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
|
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
|
||||||
|
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
// Abort RTM transaction before calling runtime
|
||||||
|
// because critical section will be large and will be
|
||||||
|
// aborted anyway. Also nmethod could be deoptimized.
|
||||||
|
__ xabort(0);
|
||||||
|
}
|
||||||
|
|
||||||
// Make room for return address (or push it again)
|
// Make room for return address (or push it again)
|
||||||
if (!cause_return) {
|
if (!cause_return) {
|
||||||
__ push(rbx);
|
__ push(rbx);
|
||||||
|
@ -50,8 +50,13 @@ int VM_Version::_cpuFeatures;
|
|||||||
const char* VM_Version::_features_str = "";
|
const char* VM_Version::_features_str = "";
|
||||||
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
|
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
|
||||||
|
|
||||||
|
// Address of instruction which causes SEGV
|
||||||
|
address VM_Version::_cpuinfo_segv_addr = 0;
|
||||||
|
// Address of instruction after the one which causes SEGV
|
||||||
|
address VM_Version::_cpuinfo_cont_addr = 0;
|
||||||
|
|
||||||
static BufferBlob* stub_blob;
|
static BufferBlob* stub_blob;
|
||||||
static const int stub_size = 550;
|
static const int stub_size = 600;
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
typedef void (*getPsrInfo_stub_t)(void*);
|
typedef void (*getPsrInfo_stub_t)(void*);
|
||||||
@ -234,9 +239,9 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
|||||||
// Check if OS has enabled XGETBV instruction to access XCR0
|
// Check if OS has enabled XGETBV instruction to access XCR0
|
||||||
// (OSXSAVE feature flag) and CPU supports AVX
|
// (OSXSAVE feature flag) and CPU supports AVX
|
||||||
//
|
//
|
||||||
__ andl(rcx, 0x18000000);
|
__ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
|
||||||
__ cmpl(rcx, 0x18000000);
|
__ cmpl(rcx, 0x18000000);
|
||||||
__ jccb(Assembler::notEqual, sef_cpuid);
|
__ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
|
||||||
|
|
||||||
//
|
//
|
||||||
// XCR0, XFEATURE_ENABLED_MASK register
|
// XCR0, XFEATURE_ENABLED_MASK register
|
||||||
@ -247,6 +252,47 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
|||||||
__ movl(Address(rsi, 0), rax);
|
__ movl(Address(rsi, 0), rax);
|
||||||
__ movl(Address(rsi, 4), rdx);
|
__ movl(Address(rsi, 4), rdx);
|
||||||
|
|
||||||
|
__ andl(rax, 0x6); // xcr0 bits sse | ymm
|
||||||
|
__ cmpl(rax, 0x6);
|
||||||
|
__ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
|
||||||
|
|
||||||
|
//
|
||||||
|
// Some OSs have a bug when upper 128bits of YMM
|
||||||
|
// registers are not restored after a signal processing.
|
||||||
|
// Generate SEGV here (reference through NULL)
|
||||||
|
// and check upper YMM bits after it.
|
||||||
|
//
|
||||||
|
VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
|
||||||
|
|
||||||
|
// load value into all 32 bytes of ymm7 register
|
||||||
|
__ movl(rcx, VM_Version::ymm_test_value());
|
||||||
|
|
||||||
|
__ movdl(xmm0, rcx);
|
||||||
|
__ pshufd(xmm0, xmm0, 0x00);
|
||||||
|
__ vinsertf128h(xmm0, xmm0, xmm0);
|
||||||
|
__ vmovdqu(xmm7, xmm0);
|
||||||
|
#ifdef _LP64
|
||||||
|
__ vmovdqu(xmm8, xmm0);
|
||||||
|
__ vmovdqu(xmm15, xmm0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
__ xorl(rsi, rsi);
|
||||||
|
VM_Version::set_cpuinfo_segv_addr( __ pc() );
|
||||||
|
// Generate SEGV
|
||||||
|
__ movl(rax, Address(rsi, 0));
|
||||||
|
|
||||||
|
VM_Version::set_cpuinfo_cont_addr( __ pc() );
|
||||||
|
// Returns here after signal. Save xmm0 to check it later.
|
||||||
|
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
|
||||||
|
__ vmovdqu(Address(rsi, 0), xmm0);
|
||||||
|
__ vmovdqu(Address(rsi, 32), xmm7);
|
||||||
|
#ifdef _LP64
|
||||||
|
__ vmovdqu(Address(rsi, 64), xmm8);
|
||||||
|
__ vmovdqu(Address(rsi, 96), xmm15);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
VM_Version::clean_cpuFeatures();
|
||||||
|
|
||||||
//
|
//
|
||||||
// cpuid(0x7) Structured Extended Features
|
// cpuid(0x7) Structured Extended Features
|
||||||
//
|
//
|
||||||
@ -429,7 +475,7 @@ void VM_Version::get_processor_features() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
char buf[256];
|
char buf[256];
|
||||||
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||||
cores_per_cpu(), threads_per_core(),
|
cores_per_cpu(), threads_per_core(),
|
||||||
cpu_family(), _model, _stepping,
|
cpu_family(), _model, _stepping,
|
||||||
(supports_cmov() ? ", cmov" : ""),
|
(supports_cmov() ? ", cmov" : ""),
|
||||||
@ -446,8 +492,9 @@ void VM_Version::get_processor_features() {
|
|||||||
(supports_avx() ? ", avx" : ""),
|
(supports_avx() ? ", avx" : ""),
|
||||||
(supports_avx2() ? ", avx2" : ""),
|
(supports_avx2() ? ", avx2" : ""),
|
||||||
(supports_aes() ? ", aes" : ""),
|
(supports_aes() ? ", aes" : ""),
|
||||||
(supports_clmul() ? ", clmul" : ""),
|
(supports_clmul() ? ", clmul" : ""),
|
||||||
(supports_erms() ? ", erms" : ""),
|
(supports_erms() ? ", erms" : ""),
|
||||||
|
(supports_rtm() ? ", rtm" : ""),
|
||||||
(supports_mmx_ext() ? ", mmxext" : ""),
|
(supports_mmx_ext() ? ", mmxext" : ""),
|
||||||
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
|
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
|
||||||
(supports_lzcnt() ? ", lzcnt": ""),
|
(supports_lzcnt() ? ", lzcnt": ""),
|
||||||
@ -488,7 +535,7 @@ void VM_Version::get_processor_features() {
|
|||||||
}
|
}
|
||||||
} else if (UseAES) {
|
} else if (UseAES) {
|
||||||
if (!FLAG_IS_DEFAULT(UseAES))
|
if (!FLAG_IS_DEFAULT(UseAES))
|
||||||
warning("AES instructions not available on this CPU");
|
warning("AES instructions are not available on this CPU");
|
||||||
FLAG_SET_DEFAULT(UseAES, false);
|
FLAG_SET_DEFAULT(UseAES, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -521,10 +568,57 @@ void VM_Version::get_processor_features() {
|
|||||||
}
|
}
|
||||||
} else if (UseAESIntrinsics) {
|
} else if (UseAESIntrinsics) {
|
||||||
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
|
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
|
||||||
warning("AES intrinsics not available on this CPU");
|
warning("AES intrinsics are not available on this CPU");
|
||||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Adjust RTM (Restricted Transactional Memory) flags
|
||||||
|
if (!supports_rtm() && UseRTMLocking) {
|
||||||
|
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||||
|
// setting during arguments processing. See use_biased_locking().
|
||||||
|
// VM_Version_init() is executed after UseBiasedLocking is used
|
||||||
|
// in Thread::allocate().
|
||||||
|
vm_exit_during_initialization("RTM instructions are not available on this CPU");
|
||||||
|
}
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
|
||||||
|
// RTM locking should be used only for applications with
|
||||||
|
// high lock contention. For now we do not use it by default.
|
||||||
|
vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
|
||||||
|
}
|
||||||
|
if (!is_power_of_2(RTMTotalCountIncrRate)) {
|
||||||
|
warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
|
||||||
|
FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
|
||||||
|
}
|
||||||
|
if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
|
||||||
|
warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
|
||||||
|
FLAG_SET_DEFAULT(RTMAbortRatio, 50);
|
||||||
|
}
|
||||||
|
} else { // !UseRTMLocking
|
||||||
|
if (UseRTMForStackLocks) {
|
||||||
|
if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
|
||||||
|
warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
|
||||||
|
}
|
||||||
|
FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
|
||||||
|
}
|
||||||
|
if (UseRTMDeopt) {
|
||||||
|
FLAG_SET_DEFAULT(UseRTMDeopt, false);
|
||||||
|
}
|
||||||
|
if (PrintPreciseRTMLockingStatistics) {
|
||||||
|
FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
// Only C2 does RTM locking optimization.
|
||||||
|
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||||
|
// setting during arguments processing. See use_biased_locking().
|
||||||
|
vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
if (UseFPUForSpilling) {
|
if (UseFPUForSpilling) {
|
||||||
if (UseSSE < 2) {
|
if (UseSSE < 2) {
|
||||||
@ -540,14 +634,28 @@ void VM_Version::get_processor_features() {
|
|||||||
if (MaxVectorSize > 32) {
|
if (MaxVectorSize > 32) {
|
||||||
FLAG_SET_DEFAULT(MaxVectorSize, 32);
|
FLAG_SET_DEFAULT(MaxVectorSize, 32);
|
||||||
}
|
}
|
||||||
if (MaxVectorSize > 16 && UseAVX == 0) {
|
if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) {
|
||||||
// Only supported with AVX+
|
// 32 bytes vectors (in YMM) are only supported with AVX+
|
||||||
FLAG_SET_DEFAULT(MaxVectorSize, 16);
|
FLAG_SET_DEFAULT(MaxVectorSize, 16);
|
||||||
}
|
}
|
||||||
if (UseSSE < 2) {
|
if (UseSSE < 2) {
|
||||||
// Only supported with SSE2+
|
// Vectors (in XMM) are only supported with SSE2+
|
||||||
FLAG_SET_DEFAULT(MaxVectorSize, 0);
|
FLAG_SET_DEFAULT(MaxVectorSize, 0);
|
||||||
}
|
}
|
||||||
|
#ifdef ASSERT
|
||||||
|
if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
|
||||||
|
tty->print_cr("State of YMM registers after signal handle:");
|
||||||
|
int nreg = 2 LP64_ONLY(+2);
|
||||||
|
const char* ymm_name[4] = {"0", "7", "8", "15"};
|
||||||
|
for (int i = 0; i < nreg; i++) {
|
||||||
|
tty->print("YMM%s:", ymm_name[i]);
|
||||||
|
for (int j = 7; j >=0; j--) {
|
||||||
|
tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
|
||||||
|
}
|
||||||
|
tty->cr();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -678,14 +786,6 @@ void VM_Version::get_processor_features() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if defined(COMPILER2) && defined(_ALLBSD_SOURCE)
|
|
||||||
if (MaxVectorSize > 16) {
|
|
||||||
// Limit vectors size to 16 bytes on BSD until it fixes
|
|
||||||
// restoring upper 128bit of YMM registers on return
|
|
||||||
// from signal handler.
|
|
||||||
FLAG_SET_DEFAULT(MaxVectorSize, 16);
|
|
||||||
}
|
|
||||||
#endif // COMPILER2
|
|
||||||
|
|
||||||
// Use count leading zeros count instruction if available.
|
// Use count leading zeros count instruction if available.
|
||||||
if (supports_lzcnt()) {
|
if (supports_lzcnt()) {
|
||||||
@ -814,6 +914,11 @@ void VM_Version::get_processor_features() {
|
|||||||
if (UseAES) {
|
if (UseAES) {
|
||||||
tty->print(" UseAES=1");
|
tty->print(" UseAES=1");
|
||||||
}
|
}
|
||||||
|
#ifdef COMPILER2
|
||||||
|
if (MaxVectorSize > 0) {
|
||||||
|
tty->print(" MaxVectorSize=%d", MaxVectorSize);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
tty->cr();
|
tty->cr();
|
||||||
tty->print("Allocation");
|
tty->print("Allocation");
|
||||||
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
|
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
|
||||||
@ -856,6 +961,27 @@ void VM_Version::get_processor_features() {
|
|||||||
#endif // !PRODUCT
|
#endif // !PRODUCT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool VM_Version::use_biased_locking() {
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
// RTM locking is most useful when there is high lock contention and
|
||||||
|
// low data contention. With high lock contention the lock is usually
|
||||||
|
// inflated and biased locking is not suitable for that case.
|
||||||
|
// RTM locking code requires that biased locking is off.
|
||||||
|
// Note: we can't switch off UseBiasedLocking in get_processor_features()
|
||||||
|
// because it is used by Thread::allocate() which is called before
|
||||||
|
// VM_Version::initialize().
|
||||||
|
if (UseRTMLocking && UseBiasedLocking) {
|
||||||
|
if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
|
||||||
|
FLAG_SET_DEFAULT(UseBiasedLocking, false);
|
||||||
|
} else {
|
||||||
|
warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
|
||||||
|
UseBiasedLocking = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return UseBiasedLocking;
|
||||||
|
}
|
||||||
|
|
||||||
void VM_Version::initialize() {
|
void VM_Version::initialize() {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
// Making this stub must be FIRST use of assembler
|
// Making this stub must be FIRST use of assembler
|
||||||
|
@ -207,7 +207,9 @@ public:
|
|||||||
: 2,
|
: 2,
|
||||||
bmi2 : 1,
|
bmi2 : 1,
|
||||||
erms : 1,
|
erms : 1,
|
||||||
: 22;
|
: 1,
|
||||||
|
rtm : 1,
|
||||||
|
: 20;
|
||||||
} bits;
|
} bits;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -229,6 +231,9 @@ protected:
|
|||||||
// 0 if this instruction is not available
|
// 0 if this instruction is not available
|
||||||
static const char* _features_str;
|
static const char* _features_str;
|
||||||
|
|
||||||
|
static address _cpuinfo_segv_addr; // address of instruction which causes SEGV
|
||||||
|
static address _cpuinfo_cont_addr; // address of instruction after the one which causes SEGV
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
CPU_CX8 = (1 << 0), // next bits are from cpuid 1 (EDX)
|
CPU_CX8 = (1 << 0), // next bits are from cpuid 1 (EDX)
|
||||||
CPU_CMOV = (1 << 1),
|
CPU_CMOV = (1 << 1),
|
||||||
@ -254,7 +259,8 @@ protected:
|
|||||||
CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions
|
CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions
|
||||||
CPU_CLMUL = (1 << 21), // carryless multiply for CRC
|
CPU_CLMUL = (1 << 21), // carryless multiply for CRC
|
||||||
CPU_BMI1 = (1 << 22),
|
CPU_BMI1 = (1 << 22),
|
||||||
CPU_BMI2 = (1 << 23)
|
CPU_BMI2 = (1 << 23),
|
||||||
|
CPU_RTM = (1 << 24) // Restricted Transactional Memory instructions
|
||||||
} cpuFeatureFlags;
|
} cpuFeatureFlags;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -361,6 +367,9 @@ protected:
|
|||||||
// extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
|
// extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
|
||||||
XemXcr0Eax xem_xcr0_eax;
|
XemXcr0Eax xem_xcr0_eax;
|
||||||
uint32_t xem_xcr0_edx; // reserved
|
uint32_t xem_xcr0_edx; // reserved
|
||||||
|
|
||||||
|
// Space to save ymm registers after signal handle
|
||||||
|
int ymm_save[8*4]; // Save ymm0, ymm7, ymm8, ymm15
|
||||||
};
|
};
|
||||||
|
|
||||||
// The actual cpuid info block
|
// The actual cpuid info block
|
||||||
@ -438,6 +447,8 @@ protected:
|
|||||||
result |= CPU_ERMS;
|
result |= CPU_ERMS;
|
||||||
if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
|
if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
|
||||||
result |= CPU_CLMUL;
|
result |= CPU_CLMUL;
|
||||||
|
if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
|
||||||
|
result |= CPU_RTM;
|
||||||
|
|
||||||
// AMD features.
|
// AMD features.
|
||||||
if (is_amd()) {
|
if (is_amd()) {
|
||||||
@ -460,6 +471,21 @@ protected:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool os_supports_avx_vectors() {
|
||||||
|
if (!supports_avx()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Verify that OS save/restore all bits of AVX registers
|
||||||
|
// during signal processing.
|
||||||
|
int nreg = 2 LP64_ONLY(+2);
|
||||||
|
for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register
|
||||||
|
if (_cpuid_info.ymm_save[i] != ymm_test_value()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void get_processor_features();
|
static void get_processor_features();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -476,10 +502,26 @@ public:
|
|||||||
static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
|
static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
|
||||||
static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
|
static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
|
||||||
static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); }
|
static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); }
|
||||||
|
static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
|
||||||
|
|
||||||
|
// The value used to check ymm register after signal handle
|
||||||
|
static int ymm_test_value() { return 0xCAFEBABE; }
|
||||||
|
|
||||||
|
static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; }
|
||||||
|
static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; }
|
||||||
|
static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; }
|
||||||
|
static address cpuinfo_cont_addr() { return _cpuinfo_cont_addr; }
|
||||||
|
|
||||||
|
static void clean_cpuFeatures() { _cpuFeatures = 0; }
|
||||||
|
static void set_avx_cpuFeatures() { _cpuFeatures = (CPU_SSE | CPU_SSE2 | CPU_AVX); }
|
||||||
|
|
||||||
|
|
||||||
// Initialization
|
// Initialization
|
||||||
static void initialize();
|
static void initialize();
|
||||||
|
|
||||||
|
// Override Abstract_VM_Version implementation
|
||||||
|
static bool use_biased_locking();
|
||||||
|
|
||||||
// Asserts
|
// Asserts
|
||||||
static void assert_is_initialized() {
|
static void assert_is_initialized() {
|
||||||
assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
|
assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
|
||||||
@ -572,6 +614,7 @@ public:
|
|||||||
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
|
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
|
||||||
static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; }
|
static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; }
|
||||||
static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; }
|
static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; }
|
||||||
|
static bool supports_rtm() { return (_cpuFeatures & CPU_RTM) != 0; }
|
||||||
static bool supports_bmi1() { return (_cpuFeatures & CPU_BMI1) != 0; }
|
static bool supports_bmi1() { return (_cpuFeatures & CPU_BMI1) != 0; }
|
||||||
static bool supports_bmi2() { return (_cpuFeatures & CPU_BMI2) != 0; }
|
static bool supports_bmi2() { return (_cpuFeatures & CPU_BMI2) != 0; }
|
||||||
// Intel features
|
// Intel features
|
||||||
|
@ -474,7 +474,125 @@ reg_class vectory_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM
|
|||||||
|
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
|
||||||
|
//----------SOURCE BLOCK-------------------------------------------------------
|
||||||
|
// This is a block of C++ code which provides values, functions, and
|
||||||
|
// definitions necessary in the rest of the architecture description
|
||||||
|
|
||||||
|
source_hpp %{
|
||||||
|
// Header information of the source block.
|
||||||
|
// Method declarations/definitions which are used outside
|
||||||
|
// the ad-scope can conveniently be defined here.
|
||||||
|
//
|
||||||
|
// To keep related declarations/definitions/uses close together,
|
||||||
|
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
|
||||||
|
|
||||||
|
class CallStubImpl {
|
||||||
|
|
||||||
|
//--------------------------------------------------------------
|
||||||
|
//---< Used for optimization in Compile::shorten_branches >---
|
||||||
|
//--------------------------------------------------------------
|
||||||
|
|
||||||
|
public:
|
||||||
|
// Size of call trampoline stub.
|
||||||
|
static uint size_call_trampoline() {
|
||||||
|
return 0; // no call trampolines on this platform
|
||||||
|
}
|
||||||
|
|
||||||
|
// number of relocations needed by a call trampoline stub
|
||||||
|
static uint reloc_call_trampoline() {
|
||||||
|
return 0; // no call trampolines on this platform
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class HandlerImpl {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static int emit_exception_handler(CodeBuffer &cbuf);
|
||||||
|
static int emit_deopt_handler(CodeBuffer& cbuf);
|
||||||
|
|
||||||
|
static uint size_exception_handler() {
|
||||||
|
// NativeCall instruction size is the same as NativeJump.
|
||||||
|
// exception handler starts out as jump and can be patched to
|
||||||
|
// a call be deoptimization. (4932387)
|
||||||
|
// Note that this value is also credited (in output.cpp) to
|
||||||
|
// the size of the code section.
|
||||||
|
return NativeJump::instruction_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
static uint size_deopt_handler() {
|
||||||
|
// three 5 byte instructions
|
||||||
|
return 15;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static uint size_deopt_handler() {
|
||||||
|
// NativeCall instruction size is the same as NativeJump.
|
||||||
|
// exception handler starts out as jump and can be patched to
|
||||||
|
// a call be deoptimization. (4932387)
|
||||||
|
// Note that this value is also credited (in output.cpp) to
|
||||||
|
// the size of the code section.
|
||||||
|
return 5 + NativeJump::instruction_size; // pushl(); jmp;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
%} // end source_hpp
|
||||||
|
|
||||||
source %{
|
source %{
|
||||||
|
|
||||||
|
// Emit exception handler code.
|
||||||
|
// Stuff framesize into a register and call a VM stub routine.
|
||||||
|
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
|
||||||
|
|
||||||
|
// Note that the code buffer's insts_mark is always relative to insts.
|
||||||
|
// That's why we must use the macroassembler to generate a handler.
|
||||||
|
MacroAssembler _masm(&cbuf);
|
||||||
|
address base = __ start_a_stub(size_exception_handler());
|
||||||
|
if (base == NULL) return 0; // CodeBuffer::expand failed
|
||||||
|
int offset = __ offset();
|
||||||
|
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
|
||||||
|
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
|
||||||
|
__ end_a_stub();
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit deopt handler code.
|
||||||
|
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
|
||||||
|
|
||||||
|
// Note that the code buffer's insts_mark is always relative to insts.
|
||||||
|
// That's why we must use the macroassembler to generate a handler.
|
||||||
|
MacroAssembler _masm(&cbuf);
|
||||||
|
address base = __ start_a_stub(size_deopt_handler());
|
||||||
|
if (base == NULL) return 0; // CodeBuffer::expand failed
|
||||||
|
int offset = __ offset();
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
address the_pc = (address) __ pc();
|
||||||
|
Label next;
|
||||||
|
// push a "the_pc" on the stack without destroying any registers
|
||||||
|
// as they all may be live.
|
||||||
|
|
||||||
|
// push address of "next"
|
||||||
|
__ call(next, relocInfo::none); // reloc none is fine since it is a disp32
|
||||||
|
__ bind(next);
|
||||||
|
// adjust it so it matches "the_pc"
|
||||||
|
__ subptr(Address(rsp, 0), __ offset() - offset);
|
||||||
|
#else
|
||||||
|
InternalAddress here(__ pc());
|
||||||
|
__ pushptr(here.addr());
|
||||||
|
#endif
|
||||||
|
|
||||||
|
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
|
||||||
|
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
|
||||||
|
__ end_a_stub();
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//=============================================================================
|
||||||
|
|
||||||
// Float masks come from different places depending on platform.
|
// Float masks come from different places depending on platform.
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
static address float_signmask() { return StubRoutines::x86::float_sign_mask(); }
|
static address float_signmask() { return StubRoutines::x86::float_sign_mask(); }
|
||||||
|
@ -1297,59 +1297,6 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
|
|||||||
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
uint size_exception_handler() {
|
|
||||||
// NativeCall instruction size is the same as NativeJump.
|
|
||||||
// exception handler starts out as jump and can be patched to
|
|
||||||
// a call be deoptimization. (4932387)
|
|
||||||
// Note that this value is also credited (in output.cpp) to
|
|
||||||
// the size of the code section.
|
|
||||||
return NativeJump::instruction_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit exception handler code. Stuff framesize into a register
|
|
||||||
// and call a VM stub routine.
|
|
||||||
int emit_exception_handler(CodeBuffer& cbuf) {
|
|
||||||
|
|
||||||
// Note that the code buffer's insts_mark is always relative to insts.
|
|
||||||
// That's why we must use the macroassembler to generate a handler.
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
address base =
|
|
||||||
__ start_a_stub(size_exception_handler());
|
|
||||||
if (base == NULL) return 0; // CodeBuffer::expand failed
|
|
||||||
int offset = __ offset();
|
|
||||||
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
|
|
||||||
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
|
|
||||||
__ end_a_stub();
|
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint size_deopt_handler() {
|
|
||||||
// NativeCall instruction size is the same as NativeJump.
|
|
||||||
// exception handler starts out as jump and can be patched to
|
|
||||||
// a call be deoptimization. (4932387)
|
|
||||||
// Note that this value is also credited (in output.cpp) to
|
|
||||||
// the size of the code section.
|
|
||||||
return 5 + NativeJump::instruction_size; // pushl(); jmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit deopt handler code.
|
|
||||||
int emit_deopt_handler(CodeBuffer& cbuf) {
|
|
||||||
|
|
||||||
// Note that the code buffer's insts_mark is always relative to insts.
|
|
||||||
// That's why we must use the macroassembler to generate a handler.
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
address base =
|
|
||||||
__ start_a_stub(size_exception_handler());
|
|
||||||
if (base == NULL) return 0; // CodeBuffer::expand failed
|
|
||||||
int offset = __ offset();
|
|
||||||
InternalAddress here(__ pc());
|
|
||||||
__ pushptr(here.addr());
|
|
||||||
|
|
||||||
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
|
|
||||||
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
|
|
||||||
__ end_a_stub();
|
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
int Matcher::regnum_to_fpu_offset(int regnum) {
|
int Matcher::regnum_to_fpu_offset(int regnum) {
|
||||||
return regnum - 32; // The FP registers are in the second chunk
|
return regnum - 32; // The FP registers are in the second chunk
|
||||||
@ -12925,13 +12872,31 @@ instruct RethrowException()
|
|||||||
|
|
||||||
// inlined locking and unlocking
|
// inlined locking and unlocking
|
||||||
|
|
||||||
|
instruct cmpFastLockRTM(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eDXRegI scr, rRegI cx1, rRegI cx2) %{
|
||||||
|
predicate(Compile::current()->use_rtm());
|
||||||
|
match(Set cr (FastLock object box));
|
||||||
|
effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box);
|
||||||
|
ins_cost(300);
|
||||||
|
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
|
||||||
|
$scr$$Register, $cx1$$Register, $cx2$$Register,
|
||||||
|
_counters, _rtm_counters, _stack_rtm_counters,
|
||||||
|
((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
|
||||||
|
true, ra_->C->profile_rtm());
|
||||||
|
%}
|
||||||
|
ins_pipe(pipe_slow);
|
||||||
|
%}
|
||||||
|
|
||||||
instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
|
instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
|
||||||
|
predicate(!Compile::current()->use_rtm());
|
||||||
match(Set cr (FastLock object box));
|
match(Set cr (FastLock object box));
|
||||||
effect(TEMP tmp, TEMP scr, USE_KILL box);
|
effect(TEMP tmp, TEMP scr, USE_KILL box);
|
||||||
ins_cost(300);
|
ins_cost(300);
|
||||||
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
|
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
|
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
|
||||||
|
$scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
|
||||||
%}
|
%}
|
||||||
ins_pipe(pipe_slow);
|
ins_pipe(pipe_slow);
|
||||||
%}
|
%}
|
||||||
@ -12942,7 +12907,7 @@ instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
|
|||||||
ins_cost(300);
|
ins_cost(300);
|
||||||
format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
|
format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
|
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm());
|
||||||
%}
|
%}
|
||||||
ins_pipe(pipe_slow);
|
ins_pipe(pipe_slow);
|
||||||
%}
|
%}
|
||||||
|
@ -1439,66 +1439,9 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
|
|||||||
return MachNode::size(ra_); // too many variables; just compute it
|
return MachNode::size(ra_); // too many variables; just compute it
|
||||||
// the hard way
|
// the hard way
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
uint size_exception_handler()
|
|
||||||
{
|
|
||||||
// NativeCall instruction size is the same as NativeJump.
|
|
||||||
// Note that this value is also credited (in output.cpp) to
|
|
||||||
// the size of the code section.
|
|
||||||
return NativeJump::instruction_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit exception handler code.
|
|
||||||
int emit_exception_handler(CodeBuffer& cbuf)
|
|
||||||
{
|
|
||||||
|
|
||||||
// Note that the code buffer's insts_mark is always relative to insts.
|
|
||||||
// That's why we must use the macroassembler to generate a handler.
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
address base =
|
|
||||||
__ start_a_stub(size_exception_handler());
|
|
||||||
if (base == NULL) return 0; // CodeBuffer::expand failed
|
|
||||||
int offset = __ offset();
|
|
||||||
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
|
|
||||||
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
|
|
||||||
__ end_a_stub();
|
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint size_deopt_handler()
|
|
||||||
{
|
|
||||||
// three 5 byte instructions
|
|
||||||
return 15;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit deopt handler code.
|
|
||||||
int emit_deopt_handler(CodeBuffer& cbuf)
|
|
||||||
{
|
|
||||||
|
|
||||||
// Note that the code buffer's insts_mark is always relative to insts.
|
|
||||||
// That's why we must use the macroassembler to generate a handler.
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
address base =
|
|
||||||
__ start_a_stub(size_deopt_handler());
|
|
||||||
if (base == NULL) return 0; // CodeBuffer::expand failed
|
|
||||||
int offset = __ offset();
|
|
||||||
address the_pc = (address) __ pc();
|
|
||||||
Label next;
|
|
||||||
// push a "the_pc" on the stack without destroying any registers
|
|
||||||
// as they all may be live.
|
|
||||||
|
|
||||||
// push address of "next"
|
|
||||||
__ call(next, relocInfo::none); // reloc none is fine since it is a disp32
|
|
||||||
__ bind(next);
|
|
||||||
// adjust it so it matches "the_pc"
|
|
||||||
__ subptr(Address(rsp, 0), __ offset() - offset);
|
|
||||||
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
|
|
||||||
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
|
|
||||||
__ end_a_stub();
|
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
int Matcher::regnum_to_fpu_offset(int regnum)
|
int Matcher::regnum_to_fpu_offset(int regnum)
|
||||||
{
|
{
|
||||||
@ -11387,13 +11330,31 @@ instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
|
|||||||
// ============================================================================
|
// ============================================================================
|
||||||
// inlined locking and unlocking
|
// inlined locking and unlocking
|
||||||
|
|
||||||
|
instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rdx_RegI scr, rRegI cx1, rRegI cx2) %{
|
||||||
|
predicate(Compile::current()->use_rtm());
|
||||||
|
match(Set cr (FastLock object box));
|
||||||
|
effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box);
|
||||||
|
ins_cost(300);
|
||||||
|
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
|
||||||
|
$scr$$Register, $cx1$$Register, $cx2$$Register,
|
||||||
|
_counters, _rtm_counters, _stack_rtm_counters,
|
||||||
|
((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
|
||||||
|
true, ra_->C->profile_rtm());
|
||||||
|
%}
|
||||||
|
ins_pipe(pipe_slow);
|
||||||
|
%}
|
||||||
|
|
||||||
instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
|
instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
|
||||||
|
predicate(!Compile::current()->use_rtm());
|
||||||
match(Set cr (FastLock object box));
|
match(Set cr (FastLock object box));
|
||||||
effect(TEMP tmp, TEMP scr, USE_KILL box);
|
effect(TEMP tmp, TEMP scr, USE_KILL box);
|
||||||
ins_cost(300);
|
ins_cost(300);
|
||||||
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
|
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
|
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
|
||||||
|
$scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
|
||||||
%}
|
%}
|
||||||
ins_pipe(pipe_slow);
|
ins_pipe(pipe_slow);
|
||||||
%}
|
%}
|
||||||
@ -11404,7 +11365,7 @@ instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
|
|||||||
ins_cost(300);
|
ins_cost(300);
|
||||||
format %{ "fastunlock $object,$box\t! kills $box,$tmp" %}
|
format %{ "fastunlock $object,$box\t! kills $box,$tmp" %}
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
|
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm());
|
||||||
%}
|
%}
|
||||||
ins_pipe(pipe_slow);
|
ins_pipe(pipe_slow);
|
||||||
%}
|
%}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -28,6 +28,6 @@
|
|||||||
|
|
||||||
#include "os_aix.inline.hpp"
|
#include "os_aix.inline.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
#include "thread_aix.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
|
|
||||||
#endif // OS_AIX_VM_MUTEX_AIX_INLINE_HPP
|
#endif // OS_AIX_VM_MUTEX_AIX_INLINE_HPP
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -61,10 +61,10 @@
|
|||||||
#include "runtime/statSampler.hpp"
|
#include "runtime/statSampler.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
#include "runtime/threadCritical.hpp"
|
#include "runtime/threadCritical.hpp"
|
||||||
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/timer.hpp"
|
#include "runtime/timer.hpp"
|
||||||
#include "services/attachListener.hpp"
|
#include "services/attachListener.hpp"
|
||||||
#include "services/runtimeService.hpp"
|
#include "services/runtimeService.hpp"
|
||||||
#include "thread_aix.inline.hpp"
|
|
||||||
#include "utilities/decoder.hpp"
|
#include "utilities/decoder.hpp"
|
||||||
#include "utilities/defaultStream.hpp"
|
#include "utilities/defaultStream.hpp"
|
||||||
#include "utilities/events.hpp"
|
#include "utilities/events.hpp"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "runtime/threadCritical.hpp"
|
#include "runtime/threadCritical.hpp"
|
||||||
#include "thread_aix.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
|
|
||||||
// put OS-includes here
|
// put OS-includes here
|
||||||
# include <pthread.h>
|
# include <pthread.h>
|
||||||
|
@ -109,6 +109,8 @@
|
|||||||
|
|
||||||
#define MAX_PATH (2 * K)
|
#define MAX_PATH (2 * K)
|
||||||
|
|
||||||
|
#define MAX_SECS 100000000
|
||||||
|
|
||||||
// for timer info max values which include all bits
|
// for timer info max values which include all bits
|
||||||
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
|
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
|
||||||
|
|
||||||
@ -2434,7 +2436,6 @@ class Semaphore : public StackObj {
|
|||||||
sem_t _semaphore;
|
sem_t _semaphore;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
Semaphore::Semaphore() {
|
Semaphore::Semaphore() {
|
||||||
sem_init(&_semaphore, 0, 0);
|
sem_init(&_semaphore, 0, 0);
|
||||||
}
|
}
|
||||||
@ -2456,8 +2457,22 @@ bool Semaphore::trywait() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool Semaphore::timedwait(unsigned int sec, int nsec) {
|
bool Semaphore::timedwait(unsigned int sec, int nsec) {
|
||||||
|
|
||||||
struct timespec ts;
|
struct timespec ts;
|
||||||
unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
|
// Semaphore's are always associated with CLOCK_REALTIME
|
||||||
|
os::Linux::clock_gettime(CLOCK_REALTIME, &ts);
|
||||||
|
// see unpackTime for discussion on overflow checking
|
||||||
|
if (sec >= MAX_SECS) {
|
||||||
|
ts.tv_sec += MAX_SECS;
|
||||||
|
ts.tv_nsec = 0;
|
||||||
|
} else {
|
||||||
|
ts.tv_sec += sec;
|
||||||
|
ts.tv_nsec += nsec;
|
||||||
|
if (ts.tv_nsec >= NANOSECS_PER_SEC) {
|
||||||
|
ts.tv_nsec -= NANOSECS_PER_SEC;
|
||||||
|
++ts.tv_sec; // note: this must be <= max_secs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
int result = sem_timedwait(&_semaphore, &ts);
|
int result = sem_timedwait(&_semaphore, &ts);
|
||||||
@ -5661,7 +5676,6 @@ void os::PlatformEvent::unpark() {
|
|||||||
* is no need to track notifications.
|
* is no need to track notifications.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define MAX_SECS 100000000
|
|
||||||
/*
|
/*
|
||||||
* This code is common to linux and solaris and will be moved to a
|
* This code is common to linux and solaris and will be moved to a
|
||||||
* common place in dolphin.
|
* common place in dolphin.
|
||||||
|
@ -2425,6 +2425,12 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
|
||||||
|
VM_Version::is_cpuinfo_segv_addr(pc)) {
|
||||||
|
// Verify that OS save/restore AVX registers.
|
||||||
|
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
|
||||||
|
}
|
||||||
|
|
||||||
if (t != NULL && t->is_Java_thread()) {
|
if (t != NULL && t->is_Java_thread()) {
|
||||||
JavaThread* thread = (JavaThread*) t;
|
JavaThread* thread = (JavaThread*) t;
|
||||||
bool in_java = thread->thread_state() == _thread_in_Java;
|
bool in_java = thread->thread_state() == _thread_in_Java;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -49,8 +49,8 @@
|
|||||||
#include "runtime/osThread.hpp"
|
#include "runtime/osThread.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/timer.hpp"
|
#include "runtime/timer.hpp"
|
||||||
#include "thread_aix.inline.hpp"
|
|
||||||
#include "utilities/events.hpp"
|
#include "utilities/events.hpp"
|
||||||
#include "utilities/vmError.hpp"
|
#include "utilities/vmError.hpp"
|
||||||
#ifdef COMPILER1
|
#ifdef COMPILER1
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -25,14 +25,14 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "runtime/threadLocalStorage.hpp"
|
#include "runtime/threadLocalStorage.hpp"
|
||||||
#include "thread_aix.inline.hpp"
|
#include "runtime/thread.hpp"
|
||||||
|
|
||||||
void ThreadLocalStorage::generate_code_for_get_thread() {
|
void ThreadLocalStorage::generate_code_for_get_thread() {
|
||||||
// nothing we can do here for user-level thread
|
// Nothing we can do here for user-level thread.
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadLocalStorage::pd_init() {
|
void ThreadLocalStorage::pd_init() {
|
||||||
// Nothing to do
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadLocalStorage::pd_set_thread(Thread* thread) {
|
void ThreadLocalStorage::pd_set_thread(Thread* thread) {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -24,8 +24,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "runtime/frame.inline.hpp"
|
#include "runtime/frame.hpp"
|
||||||
#include "thread_aix.inline.hpp"
|
#include "runtime/thread.hpp"
|
||||||
|
|
||||||
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Aix/PPC.
|
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Aix/PPC.
|
||||||
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
|
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
|
||||||
|
@ -492,6 +492,11 @@ JVM_handle_bsd_signal(int sig,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr(pc)) {
|
||||||
|
// Verify that OS save/restore AVX registers.
|
||||||
|
stub = VM_Version::cpuinfo_cont_addr();
|
||||||
|
}
|
||||||
|
|
||||||
// We test if stub is already set (by the stack overflow code
|
// We test if stub is already set (by the stack overflow code
|
||||||
// above) so it is not overwritten by the code that follows. This
|
// above) so it is not overwritten by the code that follows. This
|
||||||
// check is not required on other platforms, because on other
|
// check is not required on other platforms, because on other
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -24,8 +24,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "runtime/frame.inline.hpp"
|
#include "runtime/frame.hpp"
|
||||||
#include "thread_linux.inline.hpp"
|
#include "runtime/thread.hpp"
|
||||||
|
|
||||||
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC.
|
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC.
|
||||||
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
|
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
|
||||||
|
@ -338,6 +338,11 @@ JVM_handle_linux_signal(int sig,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr(pc)) {
|
||||||
|
// Verify that OS save/restore AVX registers.
|
||||||
|
stub = VM_Version::cpuinfo_cont_addr();
|
||||||
|
}
|
||||||
|
|
||||||
if (thread->thread_state() == _thread_in_Java) {
|
if (thread->thread_state() == _thread_in_Java) {
|
||||||
// Java thread running in Java code => find exception handler if any
|
// Java thread running in Java code => find exception handler if any
|
||||||
// a fault inside compiled code, the interpreter, or a stub
|
// a fault inside compiled code, the interpreter, or a stub
|
||||||
|
@ -459,6 +459,11 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr(pc)) {
|
||||||
|
// Verify that OS save/restore AVX registers.
|
||||||
|
stub = VM_Version::cpuinfo_cont_addr();
|
||||||
|
}
|
||||||
|
|
||||||
if (thread->thread_state() == _thread_in_vm) {
|
if (thread->thread_state() == _thread_in_vm) {
|
||||||
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
|
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
stub = StubRoutines::handler_for_unsafe_access();
|
||||||
|
@ -1582,6 +1582,8 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
|||||||
|
|
||||||
if( node->is_ideal_fastlock() && new_inst->is_ideal_fastlock() ) {
|
if( node->is_ideal_fastlock() && new_inst->is_ideal_fastlock() ) {
|
||||||
fprintf(fp, " ((MachFastLockNode*)n%d)->_counters = _counters;\n",cnt);
|
fprintf(fp, " ((MachFastLockNode*)n%d)->_counters = _counters;\n",cnt);
|
||||||
|
fprintf(fp, " ((MachFastLockNode*)n%d)->_rtm_counters = _rtm_counters;\n",cnt);
|
||||||
|
fprintf(fp, " ((MachFastLockNode*)n%d)->_stack_rtm_counters = _stack_rtm_counters;\n",cnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fill in the bottom_type where requested
|
// Fill in the bottom_type where requested
|
||||||
@ -3963,6 +3965,8 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
|
|||||||
}
|
}
|
||||||
if( inst->is_ideal_fastlock() ) {
|
if( inst->is_ideal_fastlock() ) {
|
||||||
fprintf(fp_cpp, "%s node->_counters = _leaf->as_FastLock()->counters();\n", indent);
|
fprintf(fp_cpp, "%s node->_counters = _leaf->as_FastLock()->counters();\n", indent);
|
||||||
|
fprintf(fp_cpp, "%s node->_rtm_counters = _leaf->as_FastLock()->rtm_counters();\n", indent);
|
||||||
|
fprintf(fp_cpp, "%s node->_stack_rtm_counters = _leaf->as_FastLock()->stack_rtm_counters();\n", indent);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2526,7 +2526,7 @@ void LIRGenerator::do_Goto(Goto* x) {
|
|||||||
// need to free up storage used for OSR entry point
|
// need to free up storage used for OSR entry point
|
||||||
LIR_Opr osrBuffer = block()->next()->operand();
|
LIR_Opr osrBuffer = block()->next()->operand();
|
||||||
BasicTypeList signature;
|
BasicTypeList signature;
|
||||||
signature.append(T_INT);
|
signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
|
||||||
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
|
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
|
||||||
__ move(osrBuffer, cc->args()->at(0));
|
__ move(osrBuffer, cc->args()->at(0));
|
||||||
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
|
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
|
||||||
|
@ -308,9 +308,6 @@
|
|||||||
develop(intx, InstructionCountCutoff, 37000, \
|
develop(intx, InstructionCountCutoff, 37000, \
|
||||||
"If GraphBuilder adds this many instructions, bails out") \
|
"If GraphBuilder adds this many instructions, bails out") \
|
||||||
\
|
\
|
||||||
product_pd(intx, SafepointPollOffset, \
|
|
||||||
"Offset added to polling address (Intel only)") \
|
|
||||||
\
|
|
||||||
develop(bool, ComputeExactFPURegisterUsage, true, \
|
develop(bool, ComputeExactFPURegisterUsage, true, \
|
||||||
"Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \
|
"Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \
|
||||||
\
|
\
|
||||||
|
@ -961,7 +961,8 @@ void ciEnv::register_method(ciMethod* target,
|
|||||||
AbstractCompiler* compiler,
|
AbstractCompiler* compiler,
|
||||||
int comp_level,
|
int comp_level,
|
||||||
bool has_unsafe_access,
|
bool has_unsafe_access,
|
||||||
bool has_wide_vectors) {
|
bool has_wide_vectors,
|
||||||
|
RTMState rtm_state) {
|
||||||
VM_ENTRY_MARK;
|
VM_ENTRY_MARK;
|
||||||
nmethod* nm = NULL;
|
nmethod* nm = NULL;
|
||||||
{
|
{
|
||||||
@ -1002,6 +1003,15 @@ void ciEnv::register_method(ciMethod* target,
|
|||||||
|
|
||||||
methodHandle method(THREAD, target->get_Method());
|
methodHandle method(THREAD, target->get_Method());
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
if (!failing() && (rtm_state != NoRTM) &&
|
||||||
|
(method()->method_data() != NULL) &&
|
||||||
|
(method()->method_data()->rtm_state() != rtm_state)) {
|
||||||
|
// Preemptive decompile if rtm state was changed.
|
||||||
|
record_failure("RTM state change invalidated rtm code");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (failing()) {
|
if (failing()) {
|
||||||
// While not a true deoptimization, it is a preemptive decompile.
|
// While not a true deoptimization, it is a preemptive decompile.
|
||||||
MethodData* mdo = method()->method_data();
|
MethodData* mdo = method()->method_data();
|
||||||
@ -1028,13 +1038,15 @@ void ciEnv::register_method(ciMethod* target,
|
|||||||
frame_words, oop_map_set,
|
frame_words, oop_map_set,
|
||||||
handler_table, inc_table,
|
handler_table, inc_table,
|
||||||
compiler, comp_level);
|
compiler, comp_level);
|
||||||
|
|
||||||
// Free codeBlobs
|
// Free codeBlobs
|
||||||
code_buffer->free_blob();
|
code_buffer->free_blob();
|
||||||
|
|
||||||
if (nm != NULL) {
|
if (nm != NULL) {
|
||||||
nm->set_has_unsafe_access(has_unsafe_access);
|
nm->set_has_unsafe_access(has_unsafe_access);
|
||||||
nm->set_has_wide_vectors(has_wide_vectors);
|
nm->set_has_wide_vectors(has_wide_vectors);
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
nm->set_rtm_state(rtm_state);
|
||||||
|
#endif
|
||||||
|
|
||||||
// Record successful registration.
|
// Record successful registration.
|
||||||
// (Put nm into the task handle *before* publishing to the Java heap.)
|
// (Put nm into the task handle *before* publishing to the Java heap.)
|
||||||
|
@ -365,7 +365,8 @@ public:
|
|||||||
AbstractCompiler* compiler,
|
AbstractCompiler* compiler,
|
||||||
int comp_level,
|
int comp_level,
|
||||||
bool has_unsafe_access,
|
bool has_unsafe_access,
|
||||||
bool has_wide_vectors);
|
bool has_wide_vectors,
|
||||||
|
RTMState rtm_state = NoRTM);
|
||||||
|
|
||||||
|
|
||||||
// Access to certain well known ciObjects.
|
// Access to certain well known ciObjects.
|
||||||
|
@ -478,6 +478,18 @@ public:
|
|||||||
|
|
||||||
int invocation_count() { return _invocation_counter; }
|
int invocation_count() { return _invocation_counter; }
|
||||||
int backedge_count() { return _backedge_counter; }
|
int backedge_count() { return _backedge_counter; }
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
// return cached value
|
||||||
|
int rtm_state() {
|
||||||
|
if (is_empty()) {
|
||||||
|
return NoRTM;
|
||||||
|
} else {
|
||||||
|
return get_MethodData()->rtm_state();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Transfer information about the method to MethodData*.
|
// Transfer information about the method to MethodData*.
|
||||||
// would_profile means we would like to profile this method,
|
// would_profile means we would like to profile this method,
|
||||||
// meaning it's not trivial.
|
// meaning it's not trivial.
|
||||||
|
@ -460,7 +460,9 @@ void nmethod::init_defaults() {
|
|||||||
_scavenge_root_link = NULL;
|
_scavenge_root_link = NULL;
|
||||||
_scavenge_root_state = 0;
|
_scavenge_root_state = 0;
|
||||||
_compiler = NULL;
|
_compiler = NULL;
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
_rtm_state = NoRTM;
|
||||||
|
#endif
|
||||||
#ifdef HAVE_DTRACE_H
|
#ifdef HAVE_DTRACE_H
|
||||||
_trap_offset = 0;
|
_trap_offset = 0;
|
||||||
#endif // def HAVE_DTRACE_H
|
#endif // def HAVE_DTRACE_H
|
||||||
|
@ -193,6 +193,12 @@ class nmethod : public CodeBlob {
|
|||||||
|
|
||||||
jbyte _scavenge_root_state;
|
jbyte _scavenge_root_state;
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
// RTM state at compile time. Used during deoptimization to decide
|
||||||
|
// whether to restart collecting RTM locking abort statistic again.
|
||||||
|
RTMState _rtm_state;
|
||||||
|
#endif
|
||||||
|
|
||||||
// Nmethod Flushing lock. If non-zero, then the nmethod is not removed
|
// Nmethod Flushing lock. If non-zero, then the nmethod is not removed
|
||||||
// and is not made into a zombie. However, once the nmethod is made into
|
// and is not made into a zombie. However, once the nmethod is made into
|
||||||
// a zombie, it will be locked one final time if CompiledMethodUnload
|
// a zombie, it will be locked one final time if CompiledMethodUnload
|
||||||
@ -414,6 +420,12 @@ class nmethod : public CodeBlob {
|
|||||||
bool is_zombie() const { return _state == zombie; }
|
bool is_zombie() const { return _state == zombie; }
|
||||||
bool is_unloaded() const { return _state == unloaded; }
|
bool is_unloaded() const { return _state == unloaded; }
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
// rtm state accessing and manipulating
|
||||||
|
RTMState rtm_state() const { return _rtm_state; }
|
||||||
|
void set_rtm_state(RTMState state) { _rtm_state = state; }
|
||||||
|
#endif
|
||||||
|
|
||||||
// Make the nmethod non entrant. The nmethod will continue to be
|
// Make the nmethod non entrant. The nmethod will continue to be
|
||||||
// alive. It is used when an uncommon trap happens. Returns true
|
// alive. It is used when an uncommon trap happens. Returns true
|
||||||
// if this thread changed the state of the nmethod or false if
|
// if this thread changed the state of the nmethod or false if
|
||||||
|
@ -793,53 +793,6 @@ void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply the given closure to each oop in the space \intersect memory region.
|
|
||||||
void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
|
|
||||||
assert_lock_strong(freelistLock());
|
|
||||||
if (is_empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
MemRegion cur = MemRegion(bottom(), end());
|
|
||||||
mr = mr.intersection(cur);
|
|
||||||
if (mr.is_empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (mr.equals(cur)) {
|
|
||||||
oop_iterate(cl);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
assert(mr.end() <= end(), "just took an intersection above");
|
|
||||||
HeapWord* obj_addr = block_start(mr.start());
|
|
||||||
HeapWord* t = mr.end();
|
|
||||||
|
|
||||||
SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
|
|
||||||
if (block_is_obj(obj_addr)) {
|
|
||||||
// Handle first object specially.
|
|
||||||
oop obj = oop(obj_addr);
|
|
||||||
obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
|
|
||||||
} else {
|
|
||||||
FreeChunk* fc = (FreeChunk*)obj_addr;
|
|
||||||
obj_addr += fc->size();
|
|
||||||
}
|
|
||||||
while (obj_addr < t) {
|
|
||||||
HeapWord* obj = obj_addr;
|
|
||||||
obj_addr += block_size(obj_addr);
|
|
||||||
// If "obj_addr" is not greater than top, then the
|
|
||||||
// entire object "obj" is within the region.
|
|
||||||
if (obj_addr <= t) {
|
|
||||||
if (block_is_obj(obj)) {
|
|
||||||
oop(obj)->oop_iterate(cl);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// "obj" extends beyond end of region
|
|
||||||
if (block_is_obj(obj)) {
|
|
||||||
oop(obj)->oop_iterate(&smr_blk);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: In the following methods, in order to safely be able to
|
// NOTE: In the following methods, in order to safely be able to
|
||||||
// apply the closure to an object, we need to be sure that the
|
// apply the closure to an object, we need to be sure that the
|
||||||
// object has been initialized. We are guaranteed that an object
|
// object has been initialized. We are guaranteed that an object
|
||||||
|
@ -351,7 +351,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||||||
Mutex* freelistLock() const { return &_freelistLock; }
|
Mutex* freelistLock() const { return &_freelistLock; }
|
||||||
|
|
||||||
// Iteration support
|
// Iteration support
|
||||||
void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
|
|
||||||
void oop_iterate(ExtendedOopClosure* cl);
|
void oop_iterate(ExtendedOopClosure* cl);
|
||||||
|
|
||||||
void object_iterate(ObjectClosure* blk);
|
void object_iterate(ObjectClosure* blk);
|
||||||
|
@ -3163,16 +3163,6 @@ ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
|
|||||||
cl->reset_generation();
|
cl->reset_generation();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
|
|
||||||
if (freelistLock()->owned_by_self()) {
|
|
||||||
Generation::oop_iterate(mr, cl);
|
|
||||||
} else {
|
|
||||||
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
|
|
||||||
Generation::oop_iterate(mr, cl);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
|
ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
|
||||||
if (freelistLock()->owned_by_self()) {
|
if (freelistLock()->owned_by_self()) {
|
||||||
|
@ -1285,7 +1285,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||||||
void save_sweep_limit();
|
void save_sweep_limit();
|
||||||
|
|
||||||
// More iteration support
|
// More iteration support
|
||||||
virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
|
|
||||||
virtual void oop_iterate(ExtendedOopClosure* cl);
|
virtual void oop_iterate(ExtendedOopClosure* cl);
|
||||||
virtual void safe_object_iterate(ObjectClosure* cl);
|
virtual void safe_object_iterate(ObjectClosure* cl);
|
||||||
virtual void object_iterate(ObjectClosure* cl);
|
virtual void object_iterate(ObjectClosure* cl);
|
||||||
|
@ -2018,8 +2018,8 @@ void ConcurrentMark::cleanup() {
|
|||||||
// that calculated by walking the marking bitmap.
|
// that calculated by walking the marking bitmap.
|
||||||
|
|
||||||
// Bitmaps to hold expected values
|
// Bitmaps to hold expected values
|
||||||
BitMap expected_region_bm(_region_bm.size(), false);
|
BitMap expected_region_bm(_region_bm.size(), true);
|
||||||
BitMap expected_card_bm(_card_bm.size(), false);
|
BitMap expected_card_bm(_card_bm.size(), true);
|
||||||
|
|
||||||
G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
|
G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
|
||||||
&_region_bm,
|
&_region_bm,
|
||||||
|
@ -550,7 +550,8 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
|
|
||||||
if (PrintTenuringDistribution) {
|
if (PrintTenuringDistribution) {
|
||||||
gclog_or_tty->cr();
|
gclog_or_tty->cr();
|
||||||
gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
|
gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold "
|
||||||
|
UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
|
||||||
size_policy->calculated_survivor_size_in_bytes(),
|
size_policy->calculated_survivor_size_in_bytes(),
|
||||||
_tenuring_threshold, MaxTenuringThreshold);
|
_tenuring_threshold, MaxTenuringThreshold);
|
||||||
}
|
}
|
||||||
@ -829,10 +830,10 @@ GCTaskManager* const PSScavenge::gc_task_manager() {
|
|||||||
void PSScavenge::initialize() {
|
void PSScavenge::initialize() {
|
||||||
// Arguments must have been parsed
|
// Arguments must have been parsed
|
||||||
|
|
||||||
if (AlwaysTenure) {
|
if (AlwaysTenure || NeverTenure) {
|
||||||
_tenuring_threshold = 0;
|
assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
|
||||||
} else if (NeverTenure) {
|
err_msg("MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is ", MaxTenuringThreshold));
|
||||||
_tenuring_threshold = markOopDesc::max_age + 1;
|
_tenuring_threshold = MaxTenuringThreshold;
|
||||||
} else {
|
} else {
|
||||||
// We want to smooth out our startup times for the AdaptiveSizePolicy
|
// We want to smooth out our startup times for the AdaptiveSizePolicy
|
||||||
_tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
|
_tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -80,28 +80,37 @@ void ageTable::merge_par(ageTable* subTable) {
|
|||||||
|
|
||||||
uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
|
uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
|
||||||
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
|
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
|
||||||
size_t total = 0;
|
uint result;
|
||||||
uint age = 1;
|
|
||||||
assert(sizes[0] == 0, "no objects with age zero should be recorded");
|
if (AlwaysTenure || NeverTenure) {
|
||||||
while (age < table_size) {
|
assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
|
||||||
total += sizes[age];
|
err_msg("MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is ", MaxTenuringThreshold));
|
||||||
// check if including objects of age 'age' made us pass the desired
|
result = MaxTenuringThreshold;
|
||||||
// size, if so 'age' is the new threshold
|
} else {
|
||||||
if (total > desired_survivor_size) break;
|
size_t total = 0;
|
||||||
age++;
|
uint age = 1;
|
||||||
|
assert(sizes[0] == 0, "no objects with age zero should be recorded");
|
||||||
|
while (age < table_size) {
|
||||||
|
total += sizes[age];
|
||||||
|
// check if including objects of age 'age' made us pass the desired
|
||||||
|
// size, if so 'age' is the new threshold
|
||||||
|
if (total > desired_survivor_size) break;
|
||||||
|
age++;
|
||||||
|
}
|
||||||
|
result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
|
||||||
}
|
}
|
||||||
uint result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
|
|
||||||
|
|
||||||
if (PrintTenuringDistribution || UsePerfData) {
|
if (PrintTenuringDistribution || UsePerfData) {
|
||||||
|
|
||||||
if (PrintTenuringDistribution) {
|
if (PrintTenuringDistribution) {
|
||||||
gclog_or_tty->cr();
|
gclog_or_tty->cr();
|
||||||
gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
|
gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold "
|
||||||
|
UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
|
||||||
desired_survivor_size*oopSize, result, MaxTenuringThreshold);
|
desired_survivor_size*oopSize, result, MaxTenuringThreshold);
|
||||||
}
|
}
|
||||||
|
|
||||||
total = 0;
|
size_t total = 0;
|
||||||
age = 1;
|
uint age = 1;
|
||||||
while (age < table_size) {
|
while (age < table_size) {
|
||||||
total += sizes[age];
|
total += sizes[age];
|
||||||
if (sizes[age] > 0) {
|
if (sizes[age] > 0) {
|
||||||
|
@ -837,12 +837,6 @@ void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenCollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
|
|
||||||
for (int i = 0; i < _n_gens; i++) {
|
|
||||||
_gens[i]->oop_iterate(mr, cl);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
|
void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||||
for (int i = 0; i < _n_gens; i++) {
|
for (int i = 0; i < _n_gens; i++) {
|
||||||
_gens[i]->object_iterate(cl);
|
_gens[i]->object_iterate(cl);
|
||||||
|
@ -212,7 +212,6 @@ public:
|
|||||||
|
|
||||||
// Iteration functions.
|
// Iteration functions.
|
||||||
void oop_iterate(ExtendedOopClosure* cl);
|
void oop_iterate(ExtendedOopClosure* cl);
|
||||||
void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
|
|
||||||
void object_iterate(ObjectClosure* cl);
|
void object_iterate(ObjectClosure* cl);
|
||||||
void safe_object_iterate(ObjectClosure* cl);
|
void safe_object_iterate(ObjectClosure* cl);
|
||||||
Space* space_containing(const void* addr) const;
|
Space* space_containing(const void* addr) const;
|
||||||
|
@ -295,22 +295,16 @@ bool Generation::block_is_obj(const HeapWord* p) const {
|
|||||||
|
|
||||||
class GenerationOopIterateClosure : public SpaceClosure {
|
class GenerationOopIterateClosure : public SpaceClosure {
|
||||||
public:
|
public:
|
||||||
ExtendedOopClosure* cl;
|
ExtendedOopClosure* _cl;
|
||||||
MemRegion mr;
|
|
||||||
virtual void do_space(Space* s) {
|
virtual void do_space(Space* s) {
|
||||||
s->oop_iterate(mr, cl);
|
s->oop_iterate(_cl);
|
||||||
}
|
}
|
||||||
GenerationOopIterateClosure(ExtendedOopClosure* _cl, MemRegion _mr) :
|
GenerationOopIterateClosure(ExtendedOopClosure* cl) :
|
||||||
cl(_cl), mr(_mr) {}
|
_cl(cl) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
void Generation::oop_iterate(ExtendedOopClosure* cl) {
|
void Generation::oop_iterate(ExtendedOopClosure* cl) {
|
||||||
GenerationOopIterateClosure blk(cl, _reserved);
|
GenerationOopIterateClosure blk(cl);
|
||||||
space_iterate(&blk);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Generation::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
|
|
||||||
GenerationOopIterateClosure blk(cl, mr);
|
|
||||||
space_iterate(&blk);
|
space_iterate(&blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -543,10 +543,6 @@ class Generation: public CHeapObj<mtGC> {
|
|||||||
// generation, calling "cl.do_oop" on each.
|
// generation, calling "cl.do_oop" on each.
|
||||||
virtual void oop_iterate(ExtendedOopClosure* cl);
|
virtual void oop_iterate(ExtendedOopClosure* cl);
|
||||||
|
|
||||||
// Same as above, restricted to the intersection of a memory region and
|
|
||||||
// the generation.
|
|
||||||
virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
|
|
||||||
|
|
||||||
// Iterate over all objects in the generation, calling "cl.do_object" on
|
// Iterate over all objects in the generation, calling "cl.do_object" on
|
||||||
// each.
|
// each.
|
||||||
virtual void object_iterate(ObjectClosure* cl);
|
virtual void object_iterate(ObjectClosure* cl);
|
||||||
|
@ -3343,37 +3343,22 @@ size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
||||||
if (SafepointSynchronize::is_at_safepoint()) {
|
assert(!SafepointSynchronize::is_at_safepoint()
|
||||||
assert(Thread::current()->is_VM_thread(), "should be the VM thread");
|
|| Thread::current()->is_VM_thread(), "should be the VM thread");
|
||||||
// Don't take Heap_lock
|
|
||||||
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
|
|
||||||
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
|
|
||||||
// Dark matter. Too small for dictionary.
|
|
||||||
#ifdef ASSERT
|
|
||||||
Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
|
|
||||||
#endif
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (is_class && using_class_space()) {
|
|
||||||
class_vsm()->deallocate(ptr, word_size);
|
|
||||||
} else {
|
|
||||||
vsm()->deallocate(ptr, word_size);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
|
|
||||||
|
|
||||||
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
|
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
|
||||||
// Dark matter. Too small for dictionary.
|
|
||||||
|
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
|
||||||
|
// Dark matter. Too small for dictionary.
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
|
Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
|
||||||
#endif
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (is_class && using_class_space()) {
|
if (is_class && using_class_space()) {
|
||||||
class_vsm()->deallocate(ptr, word_size);
|
class_vsm()->deallocate(ptr, word_size);
|
||||||
} else {
|
} else {
|
||||||
vsm()->deallocate(ptr, word_size);
|
vsm()->deallocate(ptr, word_size);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,9 +163,6 @@ public:
|
|||||||
// Iteration functions.
|
// Iteration functions.
|
||||||
void oop_iterate(ExtendedOopClosure* cl) = 0;
|
void oop_iterate(ExtendedOopClosure* cl) = 0;
|
||||||
|
|
||||||
// Same as above, restricted to a memory region.
|
|
||||||
virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
|
|
||||||
|
|
||||||
// Iterate over all spaces in use in the heap, in an undefined order.
|
// Iterate over all spaces in use in the heap, in an undefined order.
|
||||||
virtual void space_iterate(SpaceClosure* cl) = 0;
|
virtual void space_iterate(SpaceClosure* cl) = 0;
|
||||||
|
|
||||||
|
@ -42,9 +42,6 @@
|
|||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
|
|
||||||
void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
|
|
||||||
|
|
||||||
HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
|
HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
|
||||||
HeapWord* top_obj) {
|
HeapWord* top_obj) {
|
||||||
if (top_obj != NULL) {
|
if (top_obj != NULL) {
|
||||||
@ -686,43 +683,6 @@ void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) {
|
|
||||||
if (is_empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
MemRegion cur = MemRegion(bottom(), top());
|
|
||||||
mr = mr.intersection(cur);
|
|
||||||
if (mr.is_empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (mr.equals(cur)) {
|
|
||||||
oop_iterate(blk);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
assert(mr.end() <= top(), "just took an intersection above");
|
|
||||||
HeapWord* obj_addr = block_start(mr.start());
|
|
||||||
HeapWord* t = mr.end();
|
|
||||||
|
|
||||||
// Handle first object specially.
|
|
||||||
oop obj = oop(obj_addr);
|
|
||||||
SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
|
|
||||||
obj_addr += obj->oop_iterate(&smr_blk);
|
|
||||||
while (obj_addr < t) {
|
|
||||||
oop obj = oop(obj_addr);
|
|
||||||
assert(obj->is_oop(), "expected an oop");
|
|
||||||
obj_addr += obj->size();
|
|
||||||
// If "obj_addr" is not greater than top, then the
|
|
||||||
// entire object "obj" is within the region.
|
|
||||||
if (obj_addr <= t) {
|
|
||||||
obj->oop_iterate(blk);
|
|
||||||
} else {
|
|
||||||
// "obj" extends beyond end of region
|
|
||||||
obj->oop_iterate(&smr_blk);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
void ContiguousSpace::object_iterate(ObjectClosure* blk) {
|
void ContiguousSpace::object_iterate(ObjectClosure* blk) {
|
||||||
if (is_empty()) return;
|
if (is_empty()) return;
|
||||||
WaterMark bm = bottom_mark();
|
WaterMark bm = bottom_mark();
|
||||||
|
@ -81,31 +81,6 @@ class GenRemSet;
|
|||||||
class CardTableRS;
|
class CardTableRS;
|
||||||
class DirtyCardToOopClosure;
|
class DirtyCardToOopClosure;
|
||||||
|
|
||||||
// An oop closure that is circumscribed by a filtering memory region.
|
|
||||||
class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure {
|
|
||||||
private:
|
|
||||||
ExtendedOopClosure* _cl;
|
|
||||||
MemRegion _mr;
|
|
||||||
protected:
|
|
||||||
template <class T> void do_oop_work(T* p) {
|
|
||||||
if (_mr.contains(p)) {
|
|
||||||
_cl->do_oop(p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
public:
|
|
||||||
SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr):
|
|
||||||
_cl(cl), _mr(mr) {}
|
|
||||||
virtual void do_oop(oop* p);
|
|
||||||
virtual void do_oop(narrowOop* p);
|
|
||||||
virtual bool do_metadata() {
|
|
||||||
// _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this.
|
|
||||||
assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen.");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
virtual void do_klass(Klass* k) { ShouldNotReachHere(); }
|
|
||||||
virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
|
|
||||||
};
|
|
||||||
|
|
||||||
// A Space describes a heap area. Class Space is an abstract
|
// A Space describes a heap area. Class Space is an abstract
|
||||||
// base class.
|
// base class.
|
||||||
//
|
//
|
||||||
@ -221,11 +196,6 @@ class Space: public CHeapObj<mtGC> {
|
|||||||
// applications of the closure are not included in the iteration.
|
// applications of the closure are not included in the iteration.
|
||||||
virtual void oop_iterate(ExtendedOopClosure* cl);
|
virtual void oop_iterate(ExtendedOopClosure* cl);
|
||||||
|
|
||||||
// Same as above, restricted to the intersection of a memory region and
|
|
||||||
// the space. Fields in objects allocated by applications of the closure
|
|
||||||
// are not included in the iteration.
|
|
||||||
virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
|
|
||||||
|
|
||||||
// Iterate over all objects in the space, calling "cl.do_object" on
|
// Iterate over all objects in the space, calling "cl.do_object" on
|
||||||
// each. Objects allocated by applications of the closure are not
|
// each. Objects allocated by applications of the closure are not
|
||||||
// included in the iteration.
|
// included in the iteration.
|
||||||
@ -866,7 +836,6 @@ class ContiguousSpace: public CompactibleSpace {
|
|||||||
|
|
||||||
// Iteration
|
// Iteration
|
||||||
void oop_iterate(ExtendedOopClosure* cl);
|
void oop_iterate(ExtendedOopClosure* cl);
|
||||||
void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
|
|
||||||
void object_iterate(ObjectClosure* blk);
|
void object_iterate(ObjectClosure* blk);
|
||||||
// For contiguous spaces this method will iterate safely over objects
|
// For contiguous spaces this method will iterate safely over objects
|
||||||
// in the space (i.e., between bottom and top) when at a safepoint.
|
// in the space (i.e., between bottom and top) when at a safepoint.
|
||||||
|
@ -273,7 +273,7 @@ int Method::validate_bci_from_bcx(intptr_t bcx) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
address Method::bcp_from(int bci) const {
|
address Method::bcp_from(int bci) const {
|
||||||
assert((is_native() && bci == 0) || (!is_native() && 0 <= bci && bci < code_size()), "illegal bci");
|
assert((is_native() && bci == 0) || (!is_native() && 0 <= bci && bci < code_size()), err_msg("illegal bci: %d", bci));
|
||||||
address bcp = code_base() + bci;
|
address bcp = code_base() + bci;
|
||||||
assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method");
|
assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method");
|
||||||
return bcp;
|
return bcp;
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "classfile/systemDictionary.hpp"
|
#include "classfile/systemDictionary.hpp"
|
||||||
|
#include "compiler/compilerOracle.hpp"
|
||||||
#include "interpreter/bytecode.hpp"
|
#include "interpreter/bytecode.hpp"
|
||||||
#include "interpreter/bytecodeStream.hpp"
|
#include "interpreter/bytecodeStream.hpp"
|
||||||
#include "interpreter/linkResolver.hpp"
|
#include "interpreter/linkResolver.hpp"
|
||||||
@ -1153,6 +1154,21 @@ void MethodData::init() {
|
|||||||
_highest_osr_comp_level = 0;
|
_highest_osr_comp_level = 0;
|
||||||
_would_profile = true;
|
_would_profile = true;
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
_rtm_state = NoRTM; // No RTM lock eliding by default
|
||||||
|
if (UseRTMLocking &&
|
||||||
|
!CompilerOracle::has_option_string(_method, "NoRTMLockEliding")) {
|
||||||
|
if (CompilerOracle::has_option_string(_method, "UseRTMLockEliding") || !UseRTMDeopt) {
|
||||||
|
// Generate RTM lock eliding code without abort ratio calculation code.
|
||||||
|
_rtm_state = UseRTM;
|
||||||
|
} else if (UseRTMDeopt) {
|
||||||
|
// Generate RTM lock eliding code and include abort ratio calculation
|
||||||
|
// code if UseRTMDeopt is on.
|
||||||
|
_rtm_state = ProfileRTM;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Initialize flags and trap history.
|
// Initialize flags and trap history.
|
||||||
_nof_decompiles = 0;
|
_nof_decompiles = 0;
|
||||||
_nof_overflow_recompiles = 0;
|
_nof_overflow_recompiles = 0;
|
||||||
|
@ -2052,7 +2052,7 @@ public:
|
|||||||
|
|
||||||
// Whole-method sticky bits and flags
|
// Whole-method sticky bits and flags
|
||||||
enum {
|
enum {
|
||||||
_trap_hist_limit = 18, // decoupled from Deoptimization::Reason_LIMIT
|
_trap_hist_limit = 19, // decoupled from Deoptimization::Reason_LIMIT
|
||||||
_trap_hist_mask = max_jubyte,
|
_trap_hist_mask = max_jubyte,
|
||||||
_extra_data_count = 4 // extra DataLayout headers, for trap history
|
_extra_data_count = 4 // extra DataLayout headers, for trap history
|
||||||
}; // Public flag values
|
}; // Public flag values
|
||||||
@ -2083,6 +2083,12 @@ private:
|
|||||||
// Counter values at the time profiling started.
|
// Counter values at the time profiling started.
|
||||||
int _invocation_counter_start;
|
int _invocation_counter_start;
|
||||||
int _backedge_counter_start;
|
int _backedge_counter_start;
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
// State of RTM code generation during compilation of the method
|
||||||
|
int _rtm_state;
|
||||||
|
#endif
|
||||||
|
|
||||||
// Number of loops and blocks is computed when compiling the first
|
// Number of loops and blocks is computed when compiling the first
|
||||||
// time with C1. It is used to determine if method is trivial.
|
// time with C1. It is used to determine if method is trivial.
|
||||||
short _num_loops;
|
short _num_loops;
|
||||||
@ -2246,6 +2252,22 @@ public:
|
|||||||
InvocationCounter* invocation_counter() { return &_invocation_counter; }
|
InvocationCounter* invocation_counter() { return &_invocation_counter; }
|
||||||
InvocationCounter* backedge_counter() { return &_backedge_counter; }
|
InvocationCounter* backedge_counter() { return &_backedge_counter; }
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
int rtm_state() const {
|
||||||
|
return _rtm_state;
|
||||||
|
}
|
||||||
|
void set_rtm_state(RTMState rstate) {
|
||||||
|
_rtm_state = (int)rstate;
|
||||||
|
}
|
||||||
|
void atomic_set_rtm_state(RTMState rstate) {
|
||||||
|
Atomic::store((int)rstate, &_rtm_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rtm_state_offset_in_bytes() {
|
||||||
|
return offset_of(MethodData, _rtm_state);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void set_would_profile(bool p) { _would_profile = p; }
|
void set_would_profile(bool p) { _would_profile = p; }
|
||||||
bool would_profile() const { return _would_profile; }
|
bool would_profile() const { return _would_profile; }
|
||||||
|
|
||||||
|
@ -446,6 +446,9 @@
|
|||||||
diagnostic(bool, PrintPreciseBiasedLockingStatistics, false, \
|
diagnostic(bool, PrintPreciseBiasedLockingStatistics, false, \
|
||||||
"Print per-lock-site statistics of biased locking in JVM") \
|
"Print per-lock-site statistics of biased locking in JVM") \
|
||||||
\
|
\
|
||||||
|
diagnostic(bool, PrintPreciseRTMLockingStatistics, false, \
|
||||||
|
"Print per-lock-site statistics of rtm locking in JVM") \
|
||||||
|
\
|
||||||
notproduct(bool, PrintEliminateLocks, false, \
|
notproduct(bool, PrintEliminateLocks, false, \
|
||||||
"Print out when locks are eliminated") \
|
"Print out when locks are eliminated") \
|
||||||
\
|
\
|
||||||
|
@ -198,6 +198,7 @@ macro(NegF)
|
|||||||
macro(NeverBranch)
|
macro(NeverBranch)
|
||||||
macro(Opaque1)
|
macro(Opaque1)
|
||||||
macro(Opaque2)
|
macro(Opaque2)
|
||||||
|
macro(Opaque3)
|
||||||
macro(OrI)
|
macro(OrI)
|
||||||
macro(OrL)
|
macro(OrL)
|
||||||
macro(OverflowAddI)
|
macro(OverflowAddI)
|
||||||
|
@ -694,9 +694,10 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
|
set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
|
||||||
set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
|
set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
|
||||||
|
|
||||||
if (ProfileTraps) {
|
if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {
|
||||||
// Make sure the method being compiled gets its own MDO,
|
// Make sure the method being compiled gets its own MDO,
|
||||||
// so we can at least track the decompile_count().
|
// so we can at least track the decompile_count().
|
||||||
|
// Need MDO to record RTM code generation state.
|
||||||
method()->ensure_method_data();
|
method()->ensure_method_data();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -907,7 +908,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
compiler,
|
compiler,
|
||||||
env()->comp_level(),
|
env()->comp_level(),
|
||||||
has_unsafe_access(),
|
has_unsafe_access(),
|
||||||
SharedRuntime::is_wide_vector(max_vector_size())
|
SharedRuntime::is_wide_vector(max_vector_size()),
|
||||||
|
rtm_state()
|
||||||
);
|
);
|
||||||
|
|
||||||
if (log() != NULL) // Print code cache state into compiler log
|
if (log() != NULL) // Print code cache state into compiler log
|
||||||
@ -1073,7 +1075,23 @@ void Compile::Init(int aliaslevel) {
|
|||||||
set_do_scheduling(OptoScheduling);
|
set_do_scheduling(OptoScheduling);
|
||||||
set_do_count_invocations(false);
|
set_do_count_invocations(false);
|
||||||
set_do_method_data_update(false);
|
set_do_method_data_update(false);
|
||||||
|
set_rtm_state(NoRTM); // No RTM lock eliding by default
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) {
|
||||||
|
int rtm_state = method()->method_data()->rtm_state();
|
||||||
|
if (method_has_option("NoRTMLockEliding") || ((rtm_state & NoRTM) != 0)) {
|
||||||
|
// Don't generate RTM lock eliding code.
|
||||||
|
set_rtm_state(NoRTM);
|
||||||
|
} else if (method_has_option("UseRTMLockEliding") || ((rtm_state & UseRTM) != 0) || !UseRTMDeopt) {
|
||||||
|
// Generate RTM lock eliding code without abort ratio calculation code.
|
||||||
|
set_rtm_state(UseRTM);
|
||||||
|
} else if (UseRTMDeopt) {
|
||||||
|
// Generate RTM lock eliding code and include abort ratio calculation
|
||||||
|
// code if UseRTMDeopt is on.
|
||||||
|
set_rtm_state(ProfileRTM);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
if (debug_info()->recording_non_safepoints()) {
|
if (debug_info()->recording_non_safepoints()) {
|
||||||
set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
|
set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
|
||||||
(comp_arena(), 8, 0, NULL));
|
(comp_arena(), 8, 0, NULL));
|
||||||
@ -2581,6 +2599,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
|||||||
break;
|
break;
|
||||||
case Op_Opaque1: // Remove Opaque Nodes before matching
|
case Op_Opaque1: // Remove Opaque Nodes before matching
|
||||||
case Op_Opaque2: // Remove Opaque Nodes before matching
|
case Op_Opaque2: // Remove Opaque Nodes before matching
|
||||||
|
case Op_Opaque3:
|
||||||
n->subsume_by(n->in(1), this);
|
n->subsume_by(n->in(1), this);
|
||||||
break;
|
break;
|
||||||
case Op_CallStaticJava:
|
case Op_CallStaticJava:
|
||||||
|
@ -319,9 +319,9 @@ class Compile : public Phase {
|
|||||||
bool _trace_opto_output;
|
bool _trace_opto_output;
|
||||||
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
|
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// JSR 292
|
// JSR 292
|
||||||
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
|
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
|
||||||
|
RTMState _rtm_state; // State of Restricted Transactional Memory usage
|
||||||
|
|
||||||
// Compilation environment.
|
// Compilation environment.
|
||||||
Arena _comp_arena; // Arena with lifetime equivalent to Compile
|
Arena _comp_arena; // Arena with lifetime equivalent to Compile
|
||||||
@ -591,6 +591,10 @@ class Compile : public Phase {
|
|||||||
void set_print_inlining(bool z) { _print_inlining = z; }
|
void set_print_inlining(bool z) { _print_inlining = z; }
|
||||||
bool print_intrinsics() const { return _print_intrinsics; }
|
bool print_intrinsics() const { return _print_intrinsics; }
|
||||||
void set_print_intrinsics(bool z) { _print_intrinsics = z; }
|
void set_print_intrinsics(bool z) { _print_intrinsics = z; }
|
||||||
|
RTMState rtm_state() const { return _rtm_state; }
|
||||||
|
void set_rtm_state(RTMState s) { _rtm_state = s; }
|
||||||
|
bool use_rtm() const { return (_rtm_state & NoRTM) == 0; }
|
||||||
|
bool profile_rtm() const { return _rtm_state == ProfileRTM; }
|
||||||
// check the CompilerOracle for special behaviours for this compile
|
// check the CompilerOracle for special behaviours for this compile
|
||||||
bool method_has_option(const char * option) {
|
bool method_has_option(const char * option) {
|
||||||
return method() != NULL && method()->has_option(option);
|
return method() != NULL && method()->has_option(option);
|
||||||
|
@ -642,6 +642,19 @@ public:
|
|||||||
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
virtual const Type *bottom_type() const { return TypeInt::INT; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
//------------------------------Opaque3Node------------------------------------
|
||||||
|
// A node to prevent unwanted optimizations. Will be optimized only during
|
||||||
|
// macro nodes expansion.
|
||||||
|
class Opaque3Node : public Opaque2Node {
|
||||||
|
int _opt; // what optimization it was used for
|
||||||
|
public:
|
||||||
|
enum { RTM_OPT };
|
||||||
|
Opaque3Node(Compile* C, Node *n, int opt) : Opaque2Node(C, n), _opt(opt) {}
|
||||||
|
virtual int Opcode() const;
|
||||||
|
bool rtm_opt() const { return (_opt == RTM_OPT); }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
//----------------------PartialSubtypeCheckNode--------------------------------
|
//----------------------PartialSubtypeCheckNode--------------------------------
|
||||||
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
|
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
|
||||||
// array for an instance of the superklass. Set a hidden internal cache on a
|
// array for an instance of the superklass. Set a hidden internal cache on a
|
||||||
|
@ -1125,6 +1125,17 @@ Node* GraphKit::ConvI2L(Node* offset) {
|
|||||||
}
|
}
|
||||||
return _gvn.transform( new (C) ConvI2LNode(offset));
|
return _gvn.transform( new (C) ConvI2LNode(offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Node* GraphKit::ConvI2UL(Node* offset) {
|
||||||
|
juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);
|
||||||
|
if (offset_con != (juint) Type::OffsetBot) {
|
||||||
|
return longcon((julong) offset_con);
|
||||||
|
}
|
||||||
|
Node* conv = _gvn.transform( new (C) ConvI2LNode(offset));
|
||||||
|
Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) );
|
||||||
|
return _gvn.transform( new (C) AndLNode(conv, mask) );
|
||||||
|
}
|
||||||
|
|
||||||
Node* GraphKit::ConvL2I(Node* offset) {
|
Node* GraphKit::ConvL2I(Node* offset) {
|
||||||
// short-circuit a common case
|
// short-circuit a common case
|
||||||
jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
|
jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
|
||||||
@ -3151,10 +3162,14 @@ FastLockNode* GraphKit::shared_lock(Node* obj) {
|
|||||||
Node* mem = reset_memory();
|
Node* mem = reset_memory();
|
||||||
|
|
||||||
FastLockNode * flock = _gvn.transform(new (C) FastLockNode(0, obj, box) )->as_FastLock();
|
FastLockNode * flock = _gvn.transform(new (C) FastLockNode(0, obj, box) )->as_FastLock();
|
||||||
if (PrintPreciseBiasedLockingStatistics) {
|
if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
|
||||||
// Create the counters for this fast lock.
|
// Create the counters for this fast lock.
|
||||||
flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
|
flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create the rtm counters for this fast lock if needed.
|
||||||
|
flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
|
||||||
|
|
||||||
// Add monitor to debug info for the slow path. If we block inside the
|
// Add monitor to debug info for the slow path. If we block inside the
|
||||||
// slow path and de-opt, we need the monitor hanging around
|
// slow path and de-opt, we need the monitor hanging around
|
||||||
map()->push_monitor( flock );
|
map()->push_monitor( flock );
|
||||||
|
@ -338,6 +338,7 @@ class GraphKit : public Phase {
|
|||||||
// Convert between int and long, and size_t.
|
// Convert between int and long, and size_t.
|
||||||
// (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
|
// (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
|
||||||
Node* ConvI2L(Node* offset);
|
Node* ConvI2L(Node* offset);
|
||||||
|
Node* ConvI2UL(Node* offset);
|
||||||
Node* ConvL2I(Node* offset);
|
Node* ConvL2I(Node* offset);
|
||||||
// Find out the klass of an object.
|
// Find out the klass of an object.
|
||||||
Node* load_object_klass(Node* object);
|
Node* load_object_klass(Node* object);
|
||||||
|
@ -2600,7 +2600,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
|||||||
case T_ADDRESS:
|
case T_ADDRESS:
|
||||||
// Cast to an int type.
|
// Cast to an int type.
|
||||||
p = _gvn.transform(new (C) CastP2XNode(NULL, p));
|
p = _gvn.transform(new (C) CastP2XNode(NULL, p));
|
||||||
p = ConvX2L(p);
|
p = ConvX2UL(p);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
|
fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
|
||||||
|
@ -136,6 +136,8 @@ bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
|
|||||||
//-----------------------------hash--------------------------------------------
|
//-----------------------------hash--------------------------------------------
|
||||||
uint FastLockNode::hash() const { return NO_HASH; }
|
uint FastLockNode::hash() const { return NO_HASH; }
|
||||||
|
|
||||||
|
uint FastLockNode::size_of() const { return sizeof(*this); }
|
||||||
|
|
||||||
//------------------------------cmp--------------------------------------------
|
//------------------------------cmp--------------------------------------------
|
||||||
uint FastLockNode::cmp( const Node &n ) const {
|
uint FastLockNode::cmp( const Node &n ) const {
|
||||||
return (&n == this); // Always fail except on self
|
return (&n == this); // Always fail except on self
|
||||||
@ -159,6 +161,22 @@ void FastLockNode::create_lock_counter(JVMState* state) {
|
|||||||
_counters = blnc->counters();
|
_counters = blnc->counters();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FastLockNode::create_rtm_lock_counter(JVMState* state) {
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
Compile* C = Compile::current();
|
||||||
|
if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
|
||||||
|
RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
|
||||||
|
OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
|
||||||
|
_rtm_counters = rlnc->counters();
|
||||||
|
if (UseRTMForStackLocks) {
|
||||||
|
rlnc = (RTMLockingNamedCounter*)
|
||||||
|
OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
|
||||||
|
_stack_rtm_counters = rlnc->counters();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
//------------------------------do_monitor_enter-------------------------------
|
//------------------------------do_monitor_enter-------------------------------
|
||||||
void Parse::do_monitor_enter() {
|
void Parse::do_monitor_enter() {
|
||||||
|
@ -92,13 +92,17 @@ public:
|
|||||||
//------------------------------FastLockNode-----------------------------------
|
//------------------------------FastLockNode-----------------------------------
|
||||||
class FastLockNode: public CmpNode {
|
class FastLockNode: public CmpNode {
|
||||||
private:
|
private:
|
||||||
BiasedLockingCounters* _counters;
|
BiasedLockingCounters* _counters;
|
||||||
|
RTMLockingCounters* _rtm_counters; // RTM lock counters for inflated locks
|
||||||
|
RTMLockingCounters* _stack_rtm_counters; // RTM lock counters for stack locks
|
||||||
|
|
||||||
public:
|
public:
|
||||||
FastLockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
|
FastLockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
|
||||||
init_req(0,ctrl);
|
init_req(0,ctrl);
|
||||||
init_class_id(Class_FastLock);
|
init_class_id(Class_FastLock);
|
||||||
_counters = NULL;
|
_counters = NULL;
|
||||||
|
_rtm_counters = NULL;
|
||||||
|
_stack_rtm_counters = NULL;
|
||||||
}
|
}
|
||||||
Node* obj_node() const { return in(1); }
|
Node* obj_node() const { return in(1); }
|
||||||
Node* box_node() const { return in(2); }
|
Node* box_node() const { return in(2); }
|
||||||
@ -107,13 +111,17 @@ public:
|
|||||||
// FastLock and FastUnlockNode do not hash, we need one for each correspoding
|
// FastLock and FastUnlockNode do not hash, we need one for each correspoding
|
||||||
// LockNode/UnLockNode to avoid creating Phi's.
|
// LockNode/UnLockNode to avoid creating Phi's.
|
||||||
virtual uint hash() const ; // { return NO_HASH; }
|
virtual uint hash() const ; // { return NO_HASH; }
|
||||||
|
virtual uint size_of() const;
|
||||||
virtual uint cmp( const Node &n ) const ; // Always fail, except on self
|
virtual uint cmp( const Node &n ) const ; // Always fail, except on self
|
||||||
virtual int Opcode() const;
|
virtual int Opcode() const;
|
||||||
virtual const Type *Value( PhaseTransform *phase ) const { return TypeInt::CC; }
|
virtual const Type *Value( PhaseTransform *phase ) const { return TypeInt::CC; }
|
||||||
const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
|
const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
|
||||||
|
|
||||||
void create_lock_counter(JVMState* s);
|
void create_lock_counter(JVMState* s);
|
||||||
BiasedLockingCounters* counters() const { return _counters; }
|
void create_rtm_lock_counter(JVMState* state);
|
||||||
|
BiasedLockingCounters* counters() const { return _counters; }
|
||||||
|
RTMLockingCounters* rtm_counters() const { return _rtm_counters; }
|
||||||
|
RTMLockingCounters* stack_rtm_counters() const { return _stack_rtm_counters; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -617,6 +617,15 @@ bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
|
|||||||
case Op_AryEq: {
|
case Op_AryEq: {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
case Op_FastLock:
|
||||||
|
case Op_FastUnlock: {
|
||||||
|
// Don't unroll RTM locking code because it is large.
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
} // switch
|
} // switch
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -722,6 +731,15 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
|
|||||||
// String intrinsics are large and have loops.
|
// String intrinsics are large and have loops.
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
case Op_FastLock:
|
||||||
|
case Op_FastUnlock: {
|
||||||
|
// Don't unroll RTM locking code because it is large.
|
||||||
|
if (UseRTMLocking) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
} // switch
|
} // switch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,6 +53,7 @@ class MachSpillCopyNode;
|
|||||||
class Matcher;
|
class Matcher;
|
||||||
class PhaseRegAlloc;
|
class PhaseRegAlloc;
|
||||||
class RegMask;
|
class RegMask;
|
||||||
|
class RTMLockingCounters;
|
||||||
class State;
|
class State;
|
||||||
|
|
||||||
//---------------------------MachOper------------------------------------------
|
//---------------------------MachOper------------------------------------------
|
||||||
@ -714,8 +715,9 @@ public:
|
|||||||
class MachFastLockNode : public MachNode {
|
class MachFastLockNode : public MachNode {
|
||||||
virtual uint size_of() const { return sizeof(*this); } // Size is bigger
|
virtual uint size_of() const { return sizeof(*this); } // Size is bigger
|
||||||
public:
|
public:
|
||||||
BiasedLockingCounters* _counters;
|
BiasedLockingCounters* _counters;
|
||||||
|
RTMLockingCounters* _rtm_counters; // RTM lock counters for inflated locks
|
||||||
|
RTMLockingCounters* _stack_rtm_counters; // RTM lock counters for stack locks
|
||||||
MachFastLockNode() : MachNode() {}
|
MachFastLockNode() : MachNode() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2439,6 +2439,7 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Next, attempt to eliminate allocations
|
// Next, attempt to eliminate allocations
|
||||||
|
_has_locks = false;
|
||||||
progress = true;
|
progress = true;
|
||||||
while (progress) {
|
while (progress) {
|
||||||
progress = false;
|
progress = false;
|
||||||
@ -2457,11 +2458,13 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
|
|||||||
case Node::Class_Lock:
|
case Node::Class_Lock:
|
||||||
case Node::Class_Unlock:
|
case Node::Class_Unlock:
|
||||||
assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
|
assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
|
||||||
|
_has_locks = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
assert(n->Opcode() == Op_LoopLimit ||
|
assert(n->Opcode() == Op_LoopLimit ||
|
||||||
n->Opcode() == Op_Opaque1 ||
|
n->Opcode() == Op_Opaque1 ||
|
||||||
n->Opcode() == Op_Opaque2, "unknown node type in macro list");
|
n->Opcode() == Op_Opaque2 ||
|
||||||
|
n->Opcode() == Op_Opaque3, "unknown node type in macro list");
|
||||||
}
|
}
|
||||||
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
|
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
|
||||||
progress = progress || success;
|
progress = progress || success;
|
||||||
@ -2502,6 +2505,30 @@ bool PhaseMacroExpand::expand_macro_nodes() {
|
|||||||
} else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
|
} else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
|
||||||
_igvn.replace_node(n, n->in(1));
|
_igvn.replace_node(n, n->in(1));
|
||||||
success = true;
|
success = true;
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
} else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
|
||||||
|
assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
|
||||||
|
assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
|
||||||
|
Node* cmp = n->unique_out();
|
||||||
|
#ifdef ASSERT
|
||||||
|
// Validate graph.
|
||||||
|
assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
|
||||||
|
BoolNode* bol = cmp->unique_out()->as_Bool();
|
||||||
|
assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
|
||||||
|
(bol->_test._test == BoolTest::ne), "");
|
||||||
|
IfNode* ifn = bol->unique_out()->as_If();
|
||||||
|
assert((ifn->outcnt() == 2) &&
|
||||||
|
ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change), "");
|
||||||
|
#endif
|
||||||
|
Node* repl = n->in(1);
|
||||||
|
if (!_has_locks) {
|
||||||
|
// Remove RTM state check if there are no locks in the code.
|
||||||
|
// Replace input to compare the same value.
|
||||||
|
repl = (cmp->in(1) == n) ? cmp->in(2) : cmp->in(1);
|
||||||
|
}
|
||||||
|
_igvn.replace_node(n, repl);
|
||||||
|
success = true;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
|
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
|
||||||
progress = progress || success;
|
progress = progress || success;
|
||||||
|
@ -76,6 +76,8 @@ private:
|
|||||||
ProjNode *_memproj_catchall;
|
ProjNode *_memproj_catchall;
|
||||||
ProjNode *_resproj;
|
ProjNode *_resproj;
|
||||||
|
|
||||||
|
// Additional data collected during macro expansion
|
||||||
|
bool _has_locks;
|
||||||
|
|
||||||
void expand_allocate(AllocateNode *alloc);
|
void expand_allocate(AllocateNode *alloc);
|
||||||
void expand_allocate_array(AllocateArrayNode *alloc);
|
void expand_allocate_array(AllocateArrayNode *alloc);
|
||||||
@ -118,7 +120,7 @@ private:
|
|||||||
Node* length);
|
Node* length);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {
|
PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn), _has_locks(false) {
|
||||||
_igvn.set_delay_transform(true);
|
_igvn.set_delay_transform(true);
|
||||||
}
|
}
|
||||||
void eliminate_macro_nodes();
|
void eliminate_macro_nodes();
|
||||||
|
@ -42,18 +42,12 @@
|
|||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "utilities/xmlstream.hpp"
|
#include "utilities/xmlstream.hpp"
|
||||||
|
|
||||||
extern uint size_exception_handler();
|
|
||||||
extern uint size_deopt_handler();
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
#define DEBUG_ARG(x) , x
|
#define DEBUG_ARG(x) , x
|
||||||
#else
|
#else
|
||||||
#define DEBUG_ARG(x)
|
#define DEBUG_ARG(x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern int emit_exception_handler(CodeBuffer &cbuf);
|
|
||||||
extern int emit_deopt_handler(CodeBuffer &cbuf);
|
|
||||||
|
|
||||||
// Convert Nodes to instruction bits and pass off to the VM
|
// Convert Nodes to instruction bits and pass off to the VM
|
||||||
void Compile::Output() {
|
void Compile::Output() {
|
||||||
// RootNode goes
|
// RootNode goes
|
||||||
@ -394,6 +388,11 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
|||||||
blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
|
blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
|
||||||
reloc_size += mach->reloc();
|
reloc_size += mach->reloc();
|
||||||
if (mach->is_MachCall()) {
|
if (mach->is_MachCall()) {
|
||||||
|
// add size information for trampoline stub
|
||||||
|
// class CallStubImpl is platform-specific and defined in the *.ad files.
|
||||||
|
stub_size += CallStubImpl::size_call_trampoline();
|
||||||
|
reloc_size += CallStubImpl::reloc_call_trampoline();
|
||||||
|
|
||||||
MachCallNode *mcall = mach->as_MachCall();
|
MachCallNode *mcall = mach->as_MachCall();
|
||||||
// This destination address is NOT PC-relative
|
// This destination address is NOT PC-relative
|
||||||
|
|
||||||
@ -1133,10 +1132,9 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
|
|||||||
shorten_branches(blk_starts, code_req, locs_req, stub_req);
|
shorten_branches(blk_starts, code_req, locs_req, stub_req);
|
||||||
|
|
||||||
// nmethod and CodeBuffer count stubs & constants as part of method's code.
|
// nmethod and CodeBuffer count stubs & constants as part of method's code.
|
||||||
int exception_handler_req = size_exception_handler();
|
// class HandlerImpl is platform-specific and defined in the *.ad files.
|
||||||
int deopt_handler_req = size_deopt_handler();
|
int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
|
||||||
exception_handler_req += MAX_stubs_size; // add marginal slop for handler
|
int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
|
||||||
deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
|
|
||||||
stub_req += MAX_stubs_size; // ensure per-stub margin
|
stub_req += MAX_stubs_size; // ensure per-stub margin
|
||||||
code_req += MAX_inst_size; // ensure per-instruction margin
|
code_req += MAX_inst_size; // ensure per-instruction margin
|
||||||
|
|
||||||
@ -1622,17 +1620,18 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
|||||||
FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
|
FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
|
||||||
|
|
||||||
// Only java methods have exception handlers and deopt handlers
|
// Only java methods have exception handlers and deopt handlers
|
||||||
|
// class HandlerImpl is platform-specific and defined in the *.ad files.
|
||||||
if (_method) {
|
if (_method) {
|
||||||
// Emit the exception handler code.
|
// Emit the exception handler code.
|
||||||
_code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
|
_code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(*cb));
|
||||||
// Emit the deopt handler code.
|
// Emit the deopt handler code.
|
||||||
_code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
|
_code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb));
|
||||||
|
|
||||||
// Emit the MethodHandle deopt handler code (if required).
|
// Emit the MethodHandle deopt handler code (if required).
|
||||||
if (has_method_handle_invokes()) {
|
if (has_method_handle_invokes()) {
|
||||||
// We can use the same code as for the normal deopt handler, we
|
// We can use the same code as for the normal deopt handler, we
|
||||||
// just need a different entry point address.
|
// just need a different entry point address.
|
||||||
_code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
|
_code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -486,6 +486,8 @@ class Parse : public GraphKit {
|
|||||||
// Helper function to compute array addressing
|
// Helper function to compute array addressing
|
||||||
Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
|
Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
|
||||||
|
|
||||||
|
void rtm_deopt();
|
||||||
|
|
||||||
// Pass current map to exits
|
// Pass current map to exits
|
||||||
void return_current(Node* value);
|
void return_current(Node* value);
|
||||||
|
|
||||||
|
@ -567,6 +567,10 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Pars
|
|||||||
set_map(entry_map);
|
set_map(entry_map);
|
||||||
do_method_entry();
|
do_method_entry();
|
||||||
}
|
}
|
||||||
|
if (depth() == 1) {
|
||||||
|
// Add check to deoptimize the nmethod if RTM state was changed
|
||||||
|
rtm_deopt();
|
||||||
|
}
|
||||||
|
|
||||||
// Check for bailouts during method entry.
|
// Check for bailouts during method entry.
|
||||||
if (failing()) {
|
if (failing()) {
|
||||||
@ -2006,6 +2010,42 @@ void Parse::call_register_finalizer() {
|
|||||||
set_control( _gvn.transform(result_rgn) );
|
set_control( _gvn.transform(result_rgn) );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add check to deoptimize if RTM state is not ProfileRTM
|
||||||
|
void Parse::rtm_deopt() {
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
if (C->profile_rtm()) {
|
||||||
|
assert(C->method() != NULL, "only for normal compilations");
|
||||||
|
assert(!C->method()->method_data()->is_empty(), "MDO is needed to record RTM state");
|
||||||
|
assert(depth() == 1, "generate check only for main compiled method");
|
||||||
|
|
||||||
|
// Set starting bci for uncommon trap.
|
||||||
|
set_parse_bci(is_osr_parse() ? osr_bci() : 0);
|
||||||
|
|
||||||
|
// Load the rtm_state from the MethodData.
|
||||||
|
const TypePtr* adr_type = TypeMetadataPtr::make(C->method()->method_data());
|
||||||
|
Node* mdo = makecon(adr_type);
|
||||||
|
int offset = MethodData::rtm_state_offset_in_bytes();
|
||||||
|
Node* adr_node = basic_plus_adr(mdo, mdo, offset);
|
||||||
|
Node* rtm_state = make_load(control(), adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
|
||||||
|
|
||||||
|
// Separate Load from Cmp by Opaque.
|
||||||
|
// In expand_macro_nodes() it will be replaced either
|
||||||
|
// with this load when there are locks in the code
|
||||||
|
// or with ProfileRTM (cmp->in(2)) otherwise so that
|
||||||
|
// the check will fold.
|
||||||
|
Node* profile_state = makecon(TypeInt::make(ProfileRTM));
|
||||||
|
Node* opq = _gvn.transform( new (C) Opaque3Node(C, rtm_state, Opaque3Node::RTM_OPT) );
|
||||||
|
Node* chk = _gvn.transform( new (C) CmpINode(opq, profile_state) );
|
||||||
|
Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
|
||||||
|
// Branch to failure if state was changed
|
||||||
|
{ BuildCutout unless(this, tst, PROB_ALWAYS);
|
||||||
|
uncommon_trap(Deoptimization::Reason_rtm_state_change,
|
||||||
|
Deoptimization::Action_make_not_entrant);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------return_current---------------------------------
|
//------------------------------return_current---------------------------------
|
||||||
// Append current _map to _exit_return
|
// Append current _map to _exit_return
|
||||||
void Parse::return_current(Node* value) {
|
void Parse::return_current(Node* value) {
|
||||||
|
@ -1310,6 +1310,14 @@ void OptoRuntime::print_named_counters() {
|
|||||||
tty->print_cr("%s", c->name());
|
tty->print_cr("%s", c->name());
|
||||||
blc->print_on(tty);
|
blc->print_on(tty);
|
||||||
}
|
}
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
} else if (c->tag() == NamedCounter::RTMLockingCounter) {
|
||||||
|
RTMLockingCounters* rlc = ((RTMLockingNamedCounter*)c)->counters();
|
||||||
|
if (rlc->nonzero()) {
|
||||||
|
tty->print_cr("%s", c->name());
|
||||||
|
rlc->print_on(tty);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
c = c->next();
|
c = c->next();
|
||||||
}
|
}
|
||||||
@ -1349,6 +1357,8 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount
|
|||||||
NamedCounter* c;
|
NamedCounter* c;
|
||||||
if (tag == NamedCounter::BiasedLockingCounter) {
|
if (tag == NamedCounter::BiasedLockingCounter) {
|
||||||
c = new BiasedLockingNamedCounter(strdup(st.as_string()));
|
c = new BiasedLockingNamedCounter(strdup(st.as_string()));
|
||||||
|
} else if (tag == NamedCounter::RTMLockingCounter) {
|
||||||
|
c = new RTMLockingNamedCounter(strdup(st.as_string()));
|
||||||
} else {
|
} else {
|
||||||
c = new NamedCounter(strdup(st.as_string()), tag);
|
c = new NamedCounter(strdup(st.as_string()), tag);
|
||||||
}
|
}
|
||||||
@ -1357,6 +1367,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount
|
|||||||
// add counters so this is safe.
|
// add counters so this is safe.
|
||||||
NamedCounter* head;
|
NamedCounter* head;
|
||||||
do {
|
do {
|
||||||
|
c->set_next(NULL);
|
||||||
head = _named_counters;
|
head = _named_counters;
|
||||||
c->set_next(head);
|
c->set_next(head);
|
||||||
} while (Atomic::cmpxchg_ptr(c, &_named_counters, head) != head);
|
} while (Atomic::cmpxchg_ptr(c, &_named_counters, head) != head);
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "opto/machnode.hpp"
|
#include "opto/machnode.hpp"
|
||||||
#include "opto/type.hpp"
|
#include "opto/type.hpp"
|
||||||
#include "runtime/biasedLocking.hpp"
|
#include "runtime/biasedLocking.hpp"
|
||||||
|
#include "runtime/rtmLocking.hpp"
|
||||||
#include "runtime/deoptimization.hpp"
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "runtime/vframe.hpp"
|
#include "runtime/vframe.hpp"
|
||||||
|
|
||||||
@ -61,7 +62,8 @@ public:
|
|||||||
NoTag,
|
NoTag,
|
||||||
LockCounter,
|
LockCounter,
|
||||||
EliminatedLockCounter,
|
EliminatedLockCounter,
|
||||||
BiasedLockingCounter
|
BiasedLockingCounter,
|
||||||
|
RTMLockingCounter
|
||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -85,7 +87,7 @@ private:
|
|||||||
|
|
||||||
NamedCounter* next() const { return _next; }
|
NamedCounter* next() const { return _next; }
|
||||||
void set_next(NamedCounter* next) {
|
void set_next(NamedCounter* next) {
|
||||||
assert(_next == NULL, "already set");
|
assert(_next == NULL || next == NULL, "already set");
|
||||||
_next = next;
|
_next = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,6 +104,18 @@ class BiasedLockingNamedCounter : public NamedCounter {
|
|||||||
BiasedLockingCounters* counters() { return &_counters; }
|
BiasedLockingCounters* counters() { return &_counters; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class RTMLockingNamedCounter : public NamedCounter {
|
||||||
|
private:
|
||||||
|
RTMLockingCounters _counters;
|
||||||
|
|
||||||
|
public:
|
||||||
|
RTMLockingNamedCounter(const char *n) :
|
||||||
|
NamedCounter(n, RTMLockingCounter), _counters() {}
|
||||||
|
|
||||||
|
RTMLockingCounters* counters() { return &_counters; }
|
||||||
|
};
|
||||||
|
|
||||||
typedef const TypeFunc*(*TypeFunc_generator)();
|
typedef const TypeFunc*(*TypeFunc_generator)();
|
||||||
|
|
||||||
class OptoRuntime : public AllStatic {
|
class OptoRuntime : public AllStatic {
|
||||||
|
@ -4380,7 +4380,7 @@ const Type *TypeMetadataPtr::xmeet( const Type *t ) const {
|
|||||||
// else fall through:
|
// else fall through:
|
||||||
case TopPTR:
|
case TopPTR:
|
||||||
case AnyNull: {
|
case AnyNull: {
|
||||||
return make(ptr, NULL, offset);
|
return make(ptr, _metadata, offset);
|
||||||
}
|
}
|
||||||
case BotPTR:
|
case BotPTR:
|
||||||
case NotNull:
|
case NotNull:
|
||||||
|
@ -1716,6 +1716,7 @@ inline bool Type::is_ptr_to_boxing_obj() const {
|
|||||||
#define ConvL2X(x) (x)
|
#define ConvL2X(x) (x)
|
||||||
#define ConvX2I(x) ConvL2I(x)
|
#define ConvX2I(x) ConvL2I(x)
|
||||||
#define ConvX2L(x) (x)
|
#define ConvX2L(x) (x)
|
||||||
|
#define ConvX2UL(x) (x)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
@ -1760,6 +1761,7 @@ inline bool Type::is_ptr_to_boxing_obj() const {
|
|||||||
#define ConvL2X(x) ConvL2I(x)
|
#define ConvL2X(x) ConvL2I(x)
|
||||||
#define ConvX2I(x) (x)
|
#define ConvX2I(x) (x)
|
||||||
#define ConvX2L(x) ConvI2L(x)
|
#define ConvX2L(x) ConvI2L(x)
|
||||||
|
#define ConvX2UL(x) ConvI2UL(x)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -500,6 +500,54 @@ WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
|
|||||||
c = *p;
|
c = *p;
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
WB_ENTRY(jstring, WB_GetCPUFeatures(JNIEnv* env, jobject o))
|
||||||
|
const char* cpu_features = VM_Version::cpu_features();
|
||||||
|
ThreadToNativeFromVM ttn(thread);
|
||||||
|
jstring features_string = env->NewStringUTF(cpu_features);
|
||||||
|
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
|
||||||
|
return features_string;
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
|
||||||
|
WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
|
||||||
|
ResourceMark rm(THREAD);
|
||||||
|
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
|
||||||
|
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
|
||||||
|
jobjectArray result = NULL;
|
||||||
|
if (code == NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
int insts_size = code->insts_size();
|
||||||
|
|
||||||
|
ThreadToNativeFromVM ttn(thread);
|
||||||
|
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
result = env->NewObjectArray(2, clazz, NULL);
|
||||||
|
if (result == NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
clazz = env->FindClass(vmSymbols::java_lang_Integer()->as_C_string());
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
jmethodID constructor = env->GetMethodID(clazz, vmSymbols::object_initializer_name()->as_C_string(), vmSymbols::int_void_signature()->as_C_string());
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
jobject obj = env->NewObject(clazz, constructor, code->comp_level());
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
env->SetObjectArrayElement(result, 0, obj);
|
||||||
|
|
||||||
|
jbyteArray insts = env->NewByteArray(insts_size);
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
env->SetByteArrayRegion(insts, 0, insts_size, (jbyte*) code->insts_begin());
|
||||||
|
env->SetObjectArrayElement(result, 1, insts);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
|
||||||
//Some convenience methods to deal with objects from java
|
//Some convenience methods to deal with objects from java
|
||||||
int WhiteBox::offset_for_field(const char* field_name, oop object,
|
int WhiteBox::offset_for_field(const char* field_name, oop object,
|
||||||
Symbol* signature_symbol) {
|
Symbol* signature_symbol) {
|
||||||
@ -611,6 +659,9 @@ static JNINativeMethod methods[] = {
|
|||||||
{CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable },
|
{CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable },
|
||||||
{CC"fullGC", CC"()V", (void*)&WB_FullGC },
|
{CC"fullGC", CC"()V", (void*)&WB_FullGC },
|
||||||
{CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory },
|
{CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory },
|
||||||
|
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
|
||||||
|
{CC"getNMethod", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
|
||||||
|
(void*)&WB_GetNMethod },
|
||||||
};
|
};
|
||||||
|
|
||||||
#undef CC
|
#undef CC
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -40,7 +40,6 @@
|
|||||||
do { \
|
do { \
|
||||||
JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
|
JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
|
||||||
if (HAS_PENDING_EXCEPTION) { \
|
if (HAS_PENDING_EXCEPTION) { \
|
||||||
CLEAR_PENDING_EXCEPTION; \
|
|
||||||
return(value); \
|
return(value); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
@ -49,7 +48,6 @@
|
|||||||
do { \
|
do { \
|
||||||
JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
|
JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
|
||||||
if (HAS_PENDING_EXCEPTION) { \
|
if (HAS_PENDING_EXCEPTION) { \
|
||||||
CLEAR_PENDING_EXCEPTION; \
|
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
@ -301,6 +301,7 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
|||||||
{ "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) },
|
{ "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) },
|
||||||
{ "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) },
|
{ "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) },
|
||||||
{ "UseOldInlining", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "UseOldInlining", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
{ "SafepointPollOffset", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
#ifdef PRODUCT
|
#ifdef PRODUCT
|
||||||
{ "DesiredMethodLimit",
|
{ "DesiredMethodLimit",
|
||||||
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
|
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
|
||||||
@ -1186,11 +1187,6 @@ void Arguments::set_parnew_gc_flags() {
|
|||||||
FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
|
FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
// AlwaysTenure flag should make ParNew promote all at first collection.
|
|
||||||
// See CR 6362902.
|
|
||||||
if (AlwaysTenure) {
|
|
||||||
FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0);
|
|
||||||
}
|
|
||||||
// When using compressed oops, we use local overflow stacks,
|
// When using compressed oops, we use local overflow stacks,
|
||||||
// rather than using a global overflow list chained through
|
// rather than using a global overflow list chained through
|
||||||
// the klass word of the object's pre-image.
|
// the klass word of the object's pre-image.
|
||||||
@ -2343,10 +2339,8 @@ bool Arguments::check_vm_args_consistency() {
|
|||||||
status = status && verify_percentage(YoungGenerationSizeSupplement, "YoungGenerationSizeSupplement");
|
status = status && verify_percentage(YoungGenerationSizeSupplement, "YoungGenerationSizeSupplement");
|
||||||
status = status && verify_percentage(TenuredGenerationSizeSupplement, "TenuredGenerationSizeSupplement");
|
status = status && verify_percentage(TenuredGenerationSizeSupplement, "TenuredGenerationSizeSupplement");
|
||||||
|
|
||||||
// the "age" field in the oop header is 4 bits; do not want to pull in markOop.hpp
|
status = status && verify_interval(MaxTenuringThreshold, 0, markOopDesc::max_age + 1, "MaxTenuringThreshold");
|
||||||
// just for that, so hardcode here.
|
status = status && verify_interval(InitialTenuringThreshold, 0, MaxTenuringThreshold, "InitialTenuringThreshold");
|
||||||
status = status && verify_interval(MaxTenuringThreshold, 0, 15, "MaxTenuringThreshold");
|
|
||||||
status = status && verify_interval(InitialTenuringThreshold, 0, MaxTenuringThreshold, "MaxTenuringThreshold");
|
|
||||||
status = status && verify_percentage(TargetSurvivorRatio, "TargetSurvivorRatio");
|
status = status && verify_percentage(TargetSurvivorRatio, "TargetSurvivorRatio");
|
||||||
status = status && verify_percentage(MarkSweepDeadRatio, "MarkSweepDeadRatio");
|
status = status && verify_percentage(MarkSweepDeadRatio, "MarkSweepDeadRatio");
|
||||||
|
|
||||||
@ -3072,14 +3066,31 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
|||||||
// but disallow DR and offlining (5008695).
|
// but disallow DR and offlining (5008695).
|
||||||
FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true);
|
FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true);
|
||||||
|
|
||||||
|
// Need to keep consistency of MaxTenuringThreshold and AlwaysTenure/NeverTenure;
|
||||||
|
// and the last option wins.
|
||||||
} else if (match_option(option, "-XX:+NeverTenure", &tail)) {
|
} else if (match_option(option, "-XX:+NeverTenure", &tail)) {
|
||||||
// The last option must always win.
|
|
||||||
FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
|
|
||||||
FLAG_SET_CMDLINE(bool, NeverTenure, true);
|
FLAG_SET_CMDLINE(bool, NeverTenure, true);
|
||||||
|
FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
|
||||||
|
FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, markOopDesc::max_age + 1);
|
||||||
} else if (match_option(option, "-XX:+AlwaysTenure", &tail)) {
|
} else if (match_option(option, "-XX:+AlwaysTenure", &tail)) {
|
||||||
// The last option must always win.
|
|
||||||
FLAG_SET_CMDLINE(bool, NeverTenure, false);
|
FLAG_SET_CMDLINE(bool, NeverTenure, false);
|
||||||
FLAG_SET_CMDLINE(bool, AlwaysTenure, true);
|
FLAG_SET_CMDLINE(bool, AlwaysTenure, true);
|
||||||
|
FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0);
|
||||||
|
} else if (match_option(option, "-XX:MaxTenuringThreshold=", &tail)) {
|
||||||
|
uintx max_tenuring_thresh = 0;
|
||||||
|
if(!parse_uintx(tail, &max_tenuring_thresh, 0)) {
|
||||||
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
|
"Invalid MaxTenuringThreshold: %s\n", option->optionString);
|
||||||
|
}
|
||||||
|
FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, max_tenuring_thresh);
|
||||||
|
|
||||||
|
if (MaxTenuringThreshold == 0) {
|
||||||
|
FLAG_SET_CMDLINE(bool, NeverTenure, false);
|
||||||
|
FLAG_SET_CMDLINE(bool, AlwaysTenure, true);
|
||||||
|
} else {
|
||||||
|
FLAG_SET_CMDLINE(bool, NeverTenure, false);
|
||||||
|
FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
|
||||||
|
}
|
||||||
} else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled", &tail) ||
|
} else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled", &tail) ||
|
||||||
match_option(option, "-XX:-CMSPermGenSweepingEnabled", &tail)) {
|
match_option(option, "-XX:-CMSPermGenSweepingEnabled", &tail)) {
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
@ -3779,9 +3790,6 @@ jint Arguments::apply_ergo() {
|
|||||||
#endif // CC_INTERP
|
#endif // CC_INTERP
|
||||||
|
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
if (!UseBiasedLocking || EmitSync != 0) {
|
|
||||||
UseOptoBiasInlining = false;
|
|
||||||
}
|
|
||||||
if (!EliminateLocks) {
|
if (!EliminateLocks) {
|
||||||
EliminateNestedLocks = false;
|
EliminateNestedLocks = false;
|
||||||
}
|
}
|
||||||
@ -3842,6 +3850,11 @@ jint Arguments::apply_ergo() {
|
|||||||
UseBiasedLocking = false;
|
UseBiasedLocking = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#ifdef COMPILER2
|
||||||
|
if (!UseBiasedLocking || EmitSync != 0) {
|
||||||
|
UseOptoBiasInlining = false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
return JNI_OK;
|
return JNI_OK;
|
||||||
}
|
}
|
||||||
|
@ -1288,7 +1288,8 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
|
|||||||
gather_statistics(reason, action, trap_bc);
|
gather_statistics(reason, action, trap_bc);
|
||||||
|
|
||||||
// Ensure that we can record deopt. history:
|
// Ensure that we can record deopt. history:
|
||||||
bool create_if_missing = ProfileTraps;
|
// Need MDO to record RTM code generation state.
|
||||||
|
bool create_if_missing = ProfileTraps RTM_OPT_ONLY( || UseRTMLocking );
|
||||||
|
|
||||||
MethodData* trap_mdo =
|
MethodData* trap_mdo =
|
||||||
get_method_data(thread, trap_method, create_if_missing);
|
get_method_data(thread, trap_method, create_if_missing);
|
||||||
@ -1569,6 +1570,17 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
|
|||||||
if (tstate1 != tstate0)
|
if (tstate1 != tstate0)
|
||||||
pdata->set_trap_state(tstate1);
|
pdata->set_trap_state(tstate1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INCLUDE_RTM_OPT
|
||||||
|
// Restart collecting RTM locking abort statistic if the method
|
||||||
|
// is recompiled for a reason other than RTM state change.
|
||||||
|
// Assume that in new recompiled code the statistic could be different,
|
||||||
|
// for example, due to different inlining.
|
||||||
|
if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
|
||||||
|
UseRTMDeopt && (nm->rtm_state() != ProfileRTM)) {
|
||||||
|
trap_mdo->atomic_set_rtm_state(ProfileRTM);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (inc_recompile_count) {
|
if (inc_recompile_count) {
|
||||||
@ -1826,7 +1838,8 @@ const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
|
|||||||
"age",
|
"age",
|
||||||
"predicate",
|
"predicate",
|
||||||
"loop_limit_check",
|
"loop_limit_check",
|
||||||
"speculate_class_check"
|
"speculate_class_check",
|
||||||
|
"rtm_state_change"
|
||||||
};
|
};
|
||||||
const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
|
const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
|
||||||
// Note: Keep this in sync. with enum DeoptAction.
|
// Note: Keep this in sync. with enum DeoptAction.
|
||||||
|
@ -60,6 +60,7 @@ class Deoptimization : AllStatic {
|
|||||||
Reason_predicate, // compiler generated predicate failed
|
Reason_predicate, // compiler generated predicate failed
|
||||||
Reason_loop_limit_check, // compiler generated loop limits check failed
|
Reason_loop_limit_check, // compiler generated loop limits check failed
|
||||||
Reason_speculate_class_check, // saw unexpected object class from type speculation
|
Reason_speculate_class_check, // saw unexpected object class from type speculation
|
||||||
|
Reason_rtm_state_change, // rtm state change detected
|
||||||
Reason_LIMIT,
|
Reason_LIMIT,
|
||||||
// Note: Keep this enum in sync. with _trap_reason_name.
|
// Note: Keep this enum in sync. with _trap_reason_name.
|
||||||
Reason_RECORDED_LIMIT = Reason_bimorphic // some are not recorded per bc
|
Reason_RECORDED_LIMIT = Reason_bimorphic // some are not recorded per bc
|
||||||
|
@ -531,13 +531,16 @@ jint frame::interpreter_frame_expression_stack_size() const {
|
|||||||
// Number of elements on the interpreter expression stack
|
// Number of elements on the interpreter expression stack
|
||||||
// Callers should span by stackElementWords
|
// Callers should span by stackElementWords
|
||||||
int element_size = Interpreter::stackElementWords;
|
int element_size = Interpreter::stackElementWords;
|
||||||
|
size_t stack_size = 0;
|
||||||
if (frame::interpreter_frame_expression_stack_direction() < 0) {
|
if (frame::interpreter_frame_expression_stack_direction() < 0) {
|
||||||
return (interpreter_frame_expression_stack() -
|
stack_size = (interpreter_frame_expression_stack() -
|
||||||
interpreter_frame_tos_address() + 1)/element_size;
|
interpreter_frame_tos_address() + 1)/element_size;
|
||||||
} else {
|
} else {
|
||||||
return (interpreter_frame_tos_address() -
|
stack_size = (interpreter_frame_tos_address() -
|
||||||
interpreter_frame_expression_stack() + 1)/element_size;
|
interpreter_frame_expression_stack() + 1)/element_size;
|
||||||
}
|
}
|
||||||
|
assert( stack_size <= (size_t)max_jint, "stack size too big");
|
||||||
|
return ((jint)stack_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -265,7 +265,7 @@ void print_statistics() {
|
|||||||
os::print_statistics();
|
os::print_statistics();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PrintLockStatistics || PrintPreciseBiasedLockingStatistics) {
|
if (PrintLockStatistics || PrintPreciseBiasedLockingStatistics || PrintPreciseRTMLockingStatistics) {
|
||||||
OptoRuntime::print_named_counters();
|
OptoRuntime::print_named_counters();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -387,7 +387,7 @@ void print_statistics() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
if (PrintPreciseBiasedLockingStatistics) {
|
if (PrintPreciseBiasedLockingStatistics || PrintPreciseRTMLockingStatistics) {
|
||||||
OptoRuntime::print_named_counters();
|
OptoRuntime::print_named_counters();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user