Merge
This commit is contained in:
commit
a94ab9c7a1
hotspot
.hgtags
agent/src/os/solaris/proc
make
src
cpu
ppc/vm
cppInterpreterGenerator_ppc.hppcppInterpreter_ppc.cppinterpreterGenerator_ppc.hppinterpreter_ppc.cpptemplateInterpreterGenerator_ppc.hpptemplateInterpreter_ppc.cppvm_version_ppc.cpp
sparc/vm
cppInterpreter_sparc.cppinterpreterGenerator_sparc.hppinterpreter_sparc.cpptemplateInterpreter_sparc.cppvm_version_sparc.cpp
x86/vm
cppInterpreterGenerator_x86.hppcppInterpreter_x86.cppinterpreterGenerator_x86.cppinterpreterGenerator_x86.hppinterpreter_x86_32.cppinterpreter_x86_64.cpptemplateInterpreter_x86.cpptemplateInterpreter_x86_32.cpptemplateInterpreter_x86_64.cppvm_version_x86.cpp
zero/vm
os
aix/vm
bsd/vm
linux/vm
posix/vm
solaris/vm
windows/vm
os_cpu/solaris_sparc/vm
share/vm
asm
c1
ci
classfile
classLoader.cppclassLoader.hppdictionary.cppdictionary.hppsymbolTable.cppsystemDictionary.cppverifier.cppverifier.hpp
compiler
gc_implementation
concurrentMarkSweep
g1
interpreter
abstractInterpreter.hppcppInterpreter.cppinterpreter.cppinterpreterGenerator.hpptemplateInterpreter.cpptemplateInterpreterGenerator.hpp
memory
allocation.cppallocation.hppallocation.inline.hppcardTableRS.cppcollectorPolicy.cppheapInspection.cppmemRegion.cppresourceArea.hpp
opto
precompiled
prims
runtime
@ -428,3 +428,4 @@ c1af79d122ec9f715fa29312b5e91763f3a4dfc4 jdk9-b20
|
||||
dd472cdacc32e3afc7c5bfa7ef16ea0e0befb7fa jdk9-b23
|
||||
dde2d03b0ea46a27650839e3a1d212c7c1f7b4c8 jdk9-b24
|
||||
6de94e8693240cec8aae11f6b42f43433456a733 jdk9-b25
|
||||
48b95a073d752d6891cc0d1d2836b321ecf3ce0c jdk9-b26
|
||||
|
@ -314,7 +314,7 @@ static void * pathmap_dlopen(const char * name, int mode) {
|
||||
handle = dlopen(name, mode);
|
||||
}
|
||||
if (_libsaproc_debug) {
|
||||
printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%x\n", name, handle);
|
||||
printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%lx\n", name, (unsigned long) handle);
|
||||
}
|
||||
return handle;
|
||||
}
|
||||
|
@ -508,13 +508,9 @@ endif
|
||||
|
||||
ifeq ($(USE_CLANG),)
|
||||
# Enable bounds checking.
|
||||
# _FORTIFY_SOURCE appears in GCC 4.0+
|
||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
|
||||
# compile time size bounds checks
|
||||
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
|
||||
|
||||
# and runtime size bounds checks and paranoid stack smashing checks.
|
||||
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
|
||||
# stack smashing checks.
|
||||
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
|
||||
endif
|
||||
endif
|
||||
|
||||
|
@ -119,8 +119,8 @@ ifeq ($(INCLUDE_NMT), false)
|
||||
CFLAGS += -DINCLUDE_NMT=0
|
||||
|
||||
Src_Files_EXCLUDE += \
|
||||
memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
|
||||
memTracker.cpp nmtDCmd.cpp
|
||||
memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
|
||||
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
|
||||
endif
|
||||
|
||||
-include $(HS_ALT_MAKE)/excludeSrc.make
|
||||
|
@ -365,16 +365,13 @@ endif
|
||||
|
||||
ifeq ($(USE_CLANG),)
|
||||
# Enable bounds checking.
|
||||
# _FORTIFY_SOURCE appears in GCC 4.0+
|
||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
|
||||
# compile time size bounds checks
|
||||
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
|
||||
|
||||
# and runtime size bounds checks and paranoid stack smashing checks.
|
||||
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
|
||||
# stack smashing checks.
|
||||
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
|
||||
endif
|
||||
endif
|
||||
|
||||
|
||||
# If we are building HEADLESS, pass on to VM
|
||||
# so it can set the java.awt.headless property
|
||||
ifdef HEADLESS
|
||||
|
@ -240,11 +240,7 @@ ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
|
||||
endif
|
||||
|
||||
# Enable bounds checking.
|
||||
# _FORTIFY_SOURCE appears in GCC 4.0+
|
||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
|
||||
# compile time size bounds checks
|
||||
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
|
||||
|
||||
# and runtime size bounds checks and paranoid stack smashing checks.
|
||||
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
|
||||
# stack smashing checks.
|
||||
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
|
||||
endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,8 +26,9 @@
|
||||
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
||||
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
||||
|
||||
address generate_normal_entry(void);
|
||||
address generate_native_entry(void);
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
|
||||
void lock_method(void);
|
||||
void unlock_method(void);
|
||||
|
@ -938,8 +938,9 @@ void CppInterpreterGenerator::generate_counter_incr(Label& overflow) {
|
||||
// Interpreter stub for calling a native method. (C++ interpreter)
|
||||
// This sets up a somewhat different looking stack for calling the native method
|
||||
// than the typical interpreter frame setup.
|
||||
// The synchronized parameter is ignored.
|
||||
//
|
||||
address CppInterpreterGenerator::generate_native_entry(void) {
|
||||
address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
if (native_entry != NULL) return native_entry;
|
||||
address entry = __ pc();
|
||||
|
||||
@ -1729,7 +1730,8 @@ void CppInterpreterGenerator::generate_more_monitors() {
|
||||
__ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base); // Mark lock as unused
|
||||
}
|
||||
|
||||
address CppInterpreterGenerator::generate_normal_entry(void) {
|
||||
// The synchronized parameter is ignored
|
||||
address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
|
||||
|
||||
address entry = __ pc();
|
||||
@ -2789,38 +2791,6 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
|
||||
return interpreter_frame_manager;
|
||||
}
|
||||
|
||||
// Generate code for various sorts of method entries
|
||||
//
|
||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized : break;
|
||||
case Interpreter::native : // Fall thru
|
||||
case Interpreter::native_synchronized : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry(); break;
|
||||
case Interpreter::empty : break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
// These are special interpreter intrinsics which we don't support so far.
|
||||
case Interpreter::java_lang_math_sin : break;
|
||||
case Interpreter::java_lang_math_cos : break;
|
||||
case Interpreter::java_lang_math_tan : break;
|
||||
case Interpreter::java_lang_math_abs : break;
|
||||
case Interpreter::java_lang_math_log : break;
|
||||
case Interpreter::java_lang_math_log10 : break;
|
||||
case Interpreter::java_lang_math_sqrt : break;
|
||||
case Interpreter::java_lang_math_pow : break;
|
||||
case Interpreter::java_lang_math_exp : break;
|
||||
case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
if (entry_point) {
|
||||
return entry_point;
|
||||
}
|
||||
return ((InterpreterGenerator*)this)->generate_normal_entry();
|
||||
}
|
||||
|
||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||
: CppInterpreterGenerator(code) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -31,7 +31,12 @@
|
||||
private:
|
||||
|
||||
address generate_abstract_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_Reference_get_entry(void);
|
||||
|
||||
// Not supported
|
||||
address generate_CRC32_update_entry() { return NULL; }
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
||||
|
@ -428,6 +428,19 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry.
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry = __ pc();
|
||||
address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
|
||||
assert(normal_entry != NULL, "should already be generated.");
|
||||
__ branch_to_entry(normal_entry, R11_scratch1);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Abstract method entry.
|
||||
//
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
@ -485,203 +498,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry.
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Label Lslow_path, Lacquire;
|
||||
|
||||
const Register
|
||||
Rclass_or_obj = R3_ARG1,
|
||||
Rconst_method = R4_ARG2,
|
||||
Rcodes = Rconst_method,
|
||||
Rcpool_cache = R5_ARG3,
|
||||
Rscratch = R11_scratch1,
|
||||
Rjvmti_mode = Rscratch,
|
||||
Roffset = R12_scratch2,
|
||||
Rflags = R6_ARG4,
|
||||
Rbtable = R7_ARG5;
|
||||
|
||||
static address branch_table[number_of_states];
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
// Check for safepoint:
|
||||
// Ditch this, real man don't need safepoint checks.
|
||||
|
||||
// Also check for JVMTI mode
|
||||
// Check for null obj, take slow path if so.
|
||||
__ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
|
||||
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
|
||||
__ cmpdi(CCR1, Rclass_or_obj, 0);
|
||||
__ cmpwi(CCR0, Rjvmti_mode, 0);
|
||||
__ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
|
||||
__ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
|
||||
|
||||
// Do 2 things in parallel:
|
||||
// 1. Load the index out of the first instruction word, which looks like this:
|
||||
// <0x2a><0xb4><index (2 byte, native endianess)>.
|
||||
// 2. Load constant pool cache base.
|
||||
__ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
|
||||
__ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
|
||||
|
||||
__ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
|
||||
__ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
|
||||
|
||||
// Get the const pool entry by means of <index>.
|
||||
const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
|
||||
__ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
|
||||
__ add(Rcpool_cache, Rscratch, Rcpool_cache);
|
||||
|
||||
// Check if cpool cache entry is resolved.
|
||||
// We are resolved if the indices offset contains the current bytecode.
|
||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
||||
// Big Endian:
|
||||
__ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
|
||||
__ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
|
||||
__ bne(CCR0, Lslow_path);
|
||||
__ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
|
||||
|
||||
// Finally, start loading the value: Get cp cache entry into regs.
|
||||
__ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
|
||||
__ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
|
||||
|
||||
// Following code is from templateTable::getfield_or_static
|
||||
// Load pointer to branch table
|
||||
__ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
|
||||
|
||||
// Get volatile flag
|
||||
__ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
|
||||
// note: sync is needed before volatile load on PPC64
|
||||
|
||||
// Check field type
|
||||
__ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
|
||||
|
||||
#ifdef ASSERT
|
||||
Label LFlagInvalid;
|
||||
__ cmpldi(CCR0, Rflags, number_of_states);
|
||||
__ bge(CCR0, LFlagInvalid);
|
||||
|
||||
__ ld(R9_ARG7, 0, R1_SP);
|
||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
||||
__ asm_assert_eq("backlink", 0x543);
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
|
||||
// Load from branch table and dispatch (volatile case: one instruction ahead)
|
||||
__ sldi(Rflags, Rflags, LogBytesPerWord);
|
||||
__ cmpwi(CCR6, Rscratch, 1); // volatile?
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
|
||||
}
|
||||
__ ldx(Rbtable, Rbtable, Rflags);
|
||||
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
|
||||
}
|
||||
__ mtctr(Rbtable);
|
||||
__ bctr();
|
||||
|
||||
#ifdef ASSERT
|
||||
__ bind(LFlagInvalid);
|
||||
__ stop("got invalid flag", 0x6541);
|
||||
|
||||
bool all_uninitialized = true,
|
||||
all_initialized = true;
|
||||
for (int i = 0; i<number_of_states; ++i) {
|
||||
all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
|
||||
all_initialized = all_initialized && (branch_table[i] != NULL);
|
||||
}
|
||||
assert(all_uninitialized != all_initialized, "consistency"); // either or
|
||||
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
|
||||
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
|
||||
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
|
||||
__ stop("unexpected type", 0x6551);
|
||||
#endif
|
||||
|
||||
if (branch_table[itos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[itos] = __ pc(); // non-volatile_entry point
|
||||
__ lwax(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[ltos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[ltos] = __ pc(); // non-volatile_entry point
|
||||
__ ldx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[btos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[btos] = __ pc(); // non-volatile_entry point
|
||||
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ extsb(R3_RET, R3_RET);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[ctos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[ctos] = __ pc(); // non-volatile_entry point
|
||||
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[stos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[stos] = __ pc(); // non-volatile_entry point
|
||||
__ lhax(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[atos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[atos] = __ pc(); // non-volatile_entry point
|
||||
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
|
||||
__ verify_oop(R3_RET);
|
||||
//__ dcbt(R3_RET); // prefetch
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
__ align(32, 12);
|
||||
__ bind(Lacquire);
|
||||
__ twi_0(R3_RET);
|
||||
__ isync(); // acquire
|
||||
__ blr();
|
||||
|
||||
#ifdef ASSERT
|
||||
for (int i = 0; i<number_of_states; ++i) {
|
||||
assert(branch_table[i], "accessor_entry initialization");
|
||||
//tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
|
||||
}
|
||||
#endif
|
||||
|
||||
__ bind(Lslow_path);
|
||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Interpreter intrinsic for WeakReference.get().
|
||||
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
|
||||
// into R8 and return quickly
|
||||
@ -713,7 +529,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
@ -768,7 +583,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
return entry;
|
||||
} else {
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,6 @@
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
|
||||
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
|
||||
void unlock_method(bool check_exceptions = true);
|
||||
|
@ -602,48 +602,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
|
||||
|
||||
// End of helpers
|
||||
|
||||
// ============================================================================
|
||||
// Various method entries
|
||||
//
|
||||
|
||||
// Empty method, generate a very fast return. We must skip this entry if
|
||||
// someone's debugging, indicated by the flag
|
||||
// "interp_mode" in the Thread obj.
|
||||
// Note: empty methods are generated mostly methods that do assertions, which are
|
||||
// disabled in the "java opt build".
|
||||
address TemplateInterpreterGenerator::generate_empty_entry(void) {
|
||||
if (!UseFastEmptyMethods) {
|
||||
NOT_PRODUCT(__ should_not_reach_here();)
|
||||
return Interpreter::entry_for_kind(Interpreter::zerolocals);
|
||||
}
|
||||
|
||||
Label Lslow_path;
|
||||
const Register Rjvmti_mode = R11_scratch1;
|
||||
address entry = __ pc();
|
||||
|
||||
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
|
||||
__ cmpwi(CCR0, Rjvmti_mode, 0);
|
||||
__ bne(CCR0, Lslow_path); // jvmti_mode!=0
|
||||
|
||||
// Noone's debuggin: Simply return.
|
||||
// Pop c2i arguments (if any) off when we return.
|
||||
#ifdef ASSERT
|
||||
__ ld(R9_ARG7, 0, R1_SP);
|
||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
||||
__ asm_assert_eq("backlink", 0x545);
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
|
||||
// And we're done.
|
||||
__ blr();
|
||||
|
||||
__ bind(Lslow_path);
|
||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Support abs and sqrt like in compiler.
|
||||
// For others we can use a normal (native) entry.
|
||||
@ -1289,45 +1247,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Entry points
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(
|
||||
AbstractInterpreter::MethodKind kind) {
|
||||
// Determine code generation flags.
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
if (entry_point) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
|
||||
}
|
||||
|
||||
// These should never be compiled since the interpreter will prefer
|
||||
// the compiled version to the intrinsic version.
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
@ -1355,7 +1274,7 @@ int AbstractInterpreter::size_activation(int max_stack,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
||||
// in InterpreterGenerator::generate_fixed_frame.
|
||||
assert(Interpreter::stackElementWords == 1, "sanity");
|
||||
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
|
||||
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "vm_version_ppc.hpp"
|
||||
@ -108,7 +109,7 @@ void VM_Version::initialize() {
|
||||
(has_vand() ? " vand" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
);
|
||||
_features_str = strdup(buf);
|
||||
_features_str = os::strdup(buf);
|
||||
NOT_PRODUCT(if (Verbose) print_features(););
|
||||
|
||||
// PPC64 supports 8-byte compare-exchange operations (see
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterGenerator.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/method.hpp"
|
||||
@ -68,9 +69,7 @@ bool CppInterpreter::contains(address pc) {
|
||||
#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
|
||||
#define __ _masm->
|
||||
|
||||
Label frame_manager_entry;
|
||||
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
|
||||
// c++ interpreter entry point this holds that entry point label.
|
||||
Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
|
||||
|
||||
static address unctrap_frame_manager_entry = NULL;
|
||||
|
||||
@ -452,110 +451,6 @@ address InterpreterGenerator::generate_empty_entry(void) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry
|
||||
|
||||
// Generates code to elide accessor methods
|
||||
// Uses G3_scratch and G1_scratch as scratch
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
|
||||
// parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
|
||||
if ( UseFastAccessorMethods) {
|
||||
// Check if we need to reach a safepoint and generate full interpreter
|
||||
// frame if so.
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
__ load_contents(sync_state, G3_scratch);
|
||||
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
|
||||
// Check if local 0 != NULL
|
||||
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
|
||||
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
|
||||
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
// get first 4 bytes of the bytecodes (big endian!)
|
||||
__ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
|
||||
__ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
|
||||
|
||||
// move index @ 2 far left then to the right most two bytes.
|
||||
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
|
||||
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
||||
|
||||
// get constant pool cache
|
||||
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
|
||||
|
||||
// get specific constant pool cache entry
|
||||
__ add(G3_scratch, G1_scratch, G3_scratch);
|
||||
|
||||
// Check the constant Pool cache entry to see if it has been resolved.
|
||||
// If not, need the slow path.
|
||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
|
||||
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
__ and3(G1_scratch, 0xFF, G1_scratch);
|
||||
__ cmp(G1_scratch, Bytecodes::_getfield);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
|
||||
// Get the type and return field offset from the constant pool cache
|
||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
|
||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
|
||||
|
||||
Label xreturn_path;
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Get the type from the constant pool cache
|
||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
|
||||
// Make sure we don't need to mask G1_scratch after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmp(G1_scratch, atos );
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, itos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, stos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, ctos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
|
||||
#ifdef ASSERT
|
||||
__ cmp(G1_scratch, btos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
|
||||
__ should_not_reach_here();
|
||||
#endif
|
||||
__ ldsb(Otos_i, G3_scratch, Otos_i);
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ retl(); // return from leaf routine
|
||||
__ delayed()->mov(O5_savedSP, SP);
|
||||
|
||||
// Generate regular method entry
|
||||
__ bind(slow_path);
|
||||
__ ba(fast_accessor_slow_entry_path);
|
||||
__ delayed()->nop();
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC) {
|
||||
@ -573,7 +468,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
//
|
||||
@ -1870,23 +1765,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ ba(call_interpreter_2);
|
||||
__ delayed()->st_ptr(O1, STATE(_stack));
|
||||
|
||||
|
||||
// Fast accessor methods share this entry point.
|
||||
// This works because frame manager is in the same codelet
|
||||
// This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
|
||||
// we need to do a little register fixup here once we distinguish the two of them
|
||||
if (UseFastAccessorMethods && !synchronized) {
|
||||
// Call stub_return address still in O7
|
||||
__ bind(fast_accessor_slow_entry_path);
|
||||
__ set((intptr_t)return_from_native_method - 8, Gtmp1);
|
||||
__ cmp(Gtmp1, O7); // returning to interpreter?
|
||||
__ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep
|
||||
__ delayed()->nop();
|
||||
__ ba(re_dispatch);
|
||||
__ delayed()->mov(G0, prevState); // initial entry
|
||||
|
||||
}
|
||||
|
||||
// interpreter returning to native code (call_stub/c1/c2)
|
||||
// convert result and unwind initial activation
|
||||
// L2_scratch - scaled result type index
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,9 +32,11 @@
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
// there are no math intrinsics on sparc
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_Reference_get_entry(void);
|
||||
void lock_method(void);
|
||||
void save_native_result(void);
|
||||
@ -43,4 +45,7 @@
|
||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||
void generate_counter_overflow(Label& Lcontinue);
|
||||
|
||||
// Not supported
|
||||
address generate_CRC32_update_entry() { return NULL; }
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
|
||||
|
@ -241,6 +241,15 @@ void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
|
||||
|
||||
// Various method entries
|
||||
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry = __ pc();
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||
AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
__ jump_to(al, G3_scratch);
|
||||
__ delayed()->nop();
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
//
|
||||
@ -255,159 +264,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Entry points & stack frame layout
|
||||
//
|
||||
// Here we generate the various kind of entries into the interpreter.
|
||||
// The two main entry type are generic bytecode methods and native call method.
|
||||
// These both come in synchronized and non-synchronized versions but the
|
||||
// frame layout they create is very similar. The other method entry
|
||||
// types are really just special purpose entries that are really entry
|
||||
// and interpretation all in one. These are for trivial methods like
|
||||
// accessor, empty, or special math methods.
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// C2 Calling Conventions:
|
||||
//
|
||||
// The entry code below assumes that the following registers are set
|
||||
// when coming in:
|
||||
// G5_method: holds the Method* of the method to call
|
||||
// Lesp: points to the TOS of the callers expression stack
|
||||
// after having pushed all the parameters
|
||||
//
|
||||
// The entry code does the following to setup an interpreter frame
|
||||
// pop parameters from the callers stack by adjusting Lesp
|
||||
// set O0 to Lesp
|
||||
// compute X = (max_locals - num_parameters)
|
||||
// bump SP up by X to accomadate the extra locals
|
||||
// compute X = max_expression_stack
|
||||
// + vm_local_words
|
||||
// + 16 words of register save area
|
||||
// save frame doing a save sp, -X, sp growing towards lower addresses
|
||||
// set Lbcp, Lmethod, LcpoolCache
|
||||
// set Llocals to i0
|
||||
// set Lmonitors to FP - rounded_vm_local_words
|
||||
// set Lesp to Lmonitors - 4
|
||||
//
|
||||
// The frame has now been setup to do the rest of the entry code
|
||||
|
||||
// Try this optimization: Most method entries could live in a
|
||||
// "one size fits all" stack frame without all the dynamic size
|
||||
// calculations. It might be profitable to do all this calculation
|
||||
// statically and approximately for "small enough" methods.
|
||||
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
|
||||
// C1 Calling conventions
|
||||
//
|
||||
// Upon method entry, the following registers are setup:
|
||||
//
|
||||
// g2 G2_thread: current thread
|
||||
// g5 G5_method: method to activate
|
||||
// g4 Gargs : pointer to last argument
|
||||
//
|
||||
//
|
||||
// Stack:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
// : arguments :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
//
|
||||
//
|
||||
//
|
||||
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : :
|
||||
// | | <--- Lesp
|
||||
// +---------------+ <--- Lmonitors (fp - 0x18)
|
||||
// | VM locals |
|
||||
// +---------------+ <--- fp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : nonarg locals :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : arguments :
|
||||
// | | <--- Llocals
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : break;
|
||||
case Interpreter::java_lang_math_cos : break;
|
||||
case Interpreter::java_lang_math_tan : break;
|
||||
case Interpreter::java_lang_math_sqrt : break;
|
||||
case Interpreter::java_lang_math_abs : break;
|
||||
case Interpreter::java_lang_math_log : break;
|
||||
case Interpreter::java_lang_math_log10 : break;
|
||||
case Interpreter::java_lang_math_pow : break;
|
||||
case Interpreter::java_lang_math_exp : break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) return entry_point;
|
||||
|
||||
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
|
||||
}
|
||||
|
||||
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
// No special entry points that preclude compilation
|
||||
return true;
|
||||
|
@ -456,6 +456,115 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
|
||||
// Generate a fixed interpreter frame. This is identical setup for interpreted
|
||||
// methods and for native methods hence the shared code.
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Stack frame layout
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// C2 Calling Conventions:
|
||||
//
|
||||
// The entry code below assumes that the following registers are set
|
||||
// when coming in:
|
||||
// G5_method: holds the Method* of the method to call
|
||||
// Lesp: points to the TOS of the callers expression stack
|
||||
// after having pushed all the parameters
|
||||
//
|
||||
// The entry code does the following to setup an interpreter frame
|
||||
// pop parameters from the callers stack by adjusting Lesp
|
||||
// set O0 to Lesp
|
||||
// compute X = (max_locals - num_parameters)
|
||||
// bump SP up by X to accomadate the extra locals
|
||||
// compute X = max_expression_stack
|
||||
// + vm_local_words
|
||||
// + 16 words of register save area
|
||||
// save frame doing a save sp, -X, sp growing towards lower addresses
|
||||
// set Lbcp, Lmethod, LcpoolCache
|
||||
// set Llocals to i0
|
||||
// set Lmonitors to FP - rounded_vm_local_words
|
||||
// set Lesp to Lmonitors - 4
|
||||
//
|
||||
// The frame has now been setup to do the rest of the entry code
|
||||
|
||||
// Try this optimization: Most method entries could live in a
|
||||
// "one size fits all" stack frame without all the dynamic size
|
||||
// calculations. It might be profitable to do all this calculation
|
||||
// statically and approximately for "small enough" methods.
|
||||
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
|
||||
// C1 Calling conventions
|
||||
//
|
||||
// Upon method entry, the following registers are setup:
|
||||
//
|
||||
// g2 G2_thread: current thread
|
||||
// g5 G5_method: method to activate
|
||||
// g4 Gargs : pointer to last argument
|
||||
//
|
||||
//
|
||||
// Stack:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
// : arguments :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
//
|
||||
//
|
||||
//
|
||||
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : :
|
||||
// | | <--- Lesp
|
||||
// +---------------+ <--- Lmonitors (fp - 0x18)
|
||||
// | VM locals |
|
||||
// +---------------+ <--- fp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : nonarg locals :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : arguments :
|
||||
// | | <--- Llocals
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
|
||||
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
//
|
||||
//
|
||||
@ -599,136 +708,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
|
||||
}
|
||||
|
||||
// Empty method, generate a very fast return.
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry(void) {
|
||||
|
||||
// A method that does nother but return...
|
||||
|
||||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
|
||||
// do nothing for empty methods (do not even increment invocation counter)
|
||||
if ( UseFastEmptyMethods) {
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
__ set(sync_state, G3_scratch);
|
||||
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
|
||||
|
||||
// Code: _return
|
||||
__ retl();
|
||||
__ delayed()->mov(O5_savedSP, SP);
|
||||
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry
|
||||
|
||||
// Generates code to elide accessor methods
|
||||
// Uses G3_scratch and G1_scratch as scratch
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
|
||||
// parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
|
||||
|
||||
// XXX: for compressed oops pointer loading and decoding doesn't fit in
|
||||
// delay slot and damages G1
|
||||
if ( UseFastAccessorMethods && !UseCompressedOops ) {
|
||||
// Check if we need to reach a safepoint and generate full interpreter
|
||||
// frame if so.
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
__ load_contents(sync_state, G3_scratch);
|
||||
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
||||
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
|
||||
|
||||
// Check if local 0 != NULL
|
||||
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
|
||||
// check if local 0 == NULL and go the slow path
|
||||
__ br_null_short(Otos_i, Assembler::pn, slow_path);
|
||||
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
// get first 4 bytes of the bytecodes (big endian!)
|
||||
__ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
|
||||
__ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
|
||||
|
||||
// move index @ 2 far left then to the right most two bytes.
|
||||
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
|
||||
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
||||
|
||||
// get constant pool cache
|
||||
__ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
|
||||
|
||||
// get specific constant pool cache entry
|
||||
__ add(G3_scratch, G1_scratch, G3_scratch);
|
||||
|
||||
// Check the constant Pool cache entry to see if it has been resolved.
|
||||
// If not, need the slow path.
|
||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
|
||||
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
__ and3(G1_scratch, 0xFF, G1_scratch);
|
||||
__ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
|
||||
|
||||
// Get the type and return field offset from the constant pool cache
|
||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
|
||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
|
||||
|
||||
Label xreturn_path;
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Get the type from the constant pool cache
|
||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
|
||||
// Make sure we don't need to mask G1_scratch after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmp(G1_scratch, atos );
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, itos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, stos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, ctos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
|
||||
#ifdef ASSERT
|
||||
__ cmp(G1_scratch, btos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
|
||||
__ should_not_reach_here();
|
||||
#endif
|
||||
__ ldsb(Otos_i, G3_scratch, Otos_i);
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ retl(); // return from leaf routine
|
||||
__ delayed()->mov(O5_savedSP, SP);
|
||||
|
||||
// Generate regular method entry
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
@ -806,7 +785,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
//
|
||||
@ -1242,8 +1221,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
|
||||
// Generic method entry to (asm) interpreter
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
//
|
||||
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
address entry = __ pc();
|
||||
|
||||
@ -1410,123 +1387,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Entry points & stack frame layout
|
||||
//
|
||||
// Here we generate the various kind of entries into the interpreter.
|
||||
// The two main entry type are generic bytecode methods and native call method.
|
||||
// These both come in synchronized and non-synchronized versions but the
|
||||
// frame layout they create is very similar. The other method entry
|
||||
// types are really just special purpose entries that are really entry
|
||||
// and interpretation all in one. These are for trivial methods like
|
||||
// accessor, empty, or special math methods.
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// C2 Calling Conventions:
|
||||
//
|
||||
// The entry code below assumes that the following registers are set
|
||||
// when coming in:
|
||||
// G5_method: holds the Method* of the method to call
|
||||
// Lesp: points to the TOS of the callers expression stack
|
||||
// after having pushed all the parameters
|
||||
//
|
||||
// The entry code does the following to setup an interpreter frame
|
||||
// pop parameters from the callers stack by adjusting Lesp
|
||||
// set O0 to Lesp
|
||||
// compute X = (max_locals - num_parameters)
|
||||
// bump SP up by X to accomadate the extra locals
|
||||
// compute X = max_expression_stack
|
||||
// + vm_local_words
|
||||
// + 16 words of register save area
|
||||
// save frame doing a save sp, -X, sp growing towards lower addresses
|
||||
// set Lbcp, Lmethod, LcpoolCache
|
||||
// set Llocals to i0
|
||||
// set Lmonitors to FP - rounded_vm_local_words
|
||||
// set Lesp to Lmonitors - 4
|
||||
//
|
||||
// The frame has now been setup to do the rest of the entry code
|
||||
|
||||
// Try this optimization: Most method entries could live in a
|
||||
// "one size fits all" stack frame without all the dynamic size
|
||||
// calculations. It might be profitable to do all this calculation
|
||||
// statically and approximately for "small enough" methods.
|
||||
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
|
||||
// C1 Calling conventions
|
||||
//
|
||||
// Upon method entry, the following registers are setup:
|
||||
//
|
||||
// g2 G2_thread: current thread
|
||||
// g5 G5_method: method to activate
|
||||
// g4 Gargs : pointer to last argument
|
||||
//
|
||||
//
|
||||
// Stack:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
// : arguments :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
//
|
||||
//
|
||||
//
|
||||
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : :
|
||||
// | | <--- Lesp
|
||||
// +---------------+ <--- Lmonitors (fp - 0x18)
|
||||
// | VM locals |
|
||||
// +---------------+ <--- fp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : nonarg locals :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : arguments :
|
||||
// | | <--- Llocals
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
|
||||
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
|
||||
|
||||
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "vm_version_sparc.hpp"
|
||||
|
||||
@ -249,7 +250,7 @@ void VM_Version::initialize() {
|
||||
(!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
|
||||
|
||||
// buf is started with ", " or is empty
|
||||
_features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);
|
||||
_features_str = os::strdup(strlen(buf) > 2 ? buf + 2 : buf);
|
||||
|
||||
// There are three 64-bit SPARC families that do not overlap, e.g.,
|
||||
// both is_ultra3() and is_sparc64() cannot be true at the same time.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,21 +27,6 @@
|
||||
|
||||
protected:
|
||||
|
||||
#if 0
|
||||
address generate_asm_interpreter_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_Reference_get_entry(void);
|
||||
void lock_method(void);
|
||||
void generate_stack_overflow_check(void);
|
||||
|
||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||
void generate_counter_overflow(Label* do_continue);
|
||||
#endif
|
||||
|
||||
void generate_more_monitors();
|
||||
void generate_deopt_handling();
|
||||
address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,9 +66,6 @@ extern "C" void RecursiveInterpreterActivation(interpreterState istate )
|
||||
#define __ _masm->
|
||||
#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
|
||||
|
||||
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
|
||||
// c++ interpreter entry point this holds that entry point label.
|
||||
|
||||
// default registers for state and sender_sp
|
||||
// state and sender_sp are the same on 32bit because we have no choice.
|
||||
// state could be rsi on 64bit but it is an arg reg and not callee save
|
||||
@ -660,7 +657,6 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
// generate_method_entry) so the guard should work for them too.
|
||||
//
|
||||
|
||||
// monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
|
||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
|
||||
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
|
||||
@ -794,156 +790,6 @@ void InterpreterGenerator::lock_method(void) {
|
||||
__ lock_object(monitor);
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
|
||||
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// rbx: Method*
|
||||
|
||||
// rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
|
||||
|
||||
Label xreturn_path;
|
||||
|
||||
// do fastpath for resolved accessor methods
|
||||
if (UseFastAccessorMethods) {
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
// ASM/C++ Interpreter
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
// rbx,: method
|
||||
// rcx: receiver
|
||||
__ movptr(rax, Address(rsp, wordSize));
|
||||
|
||||
// check if local 0 != NULL and read field
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
__ shrl(rdx, 2*BitsPerByte);
|
||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
||||
|
||||
// rax,: local 0
|
||||
// rbx,: method
|
||||
// rcx: receiver - do not destroy since it is needed for slow path!
|
||||
// rcx: scratch
|
||||
// rdx: constant pool cache index
|
||||
// rdi: constant pool cache
|
||||
// rsi/r13: sender sp
|
||||
|
||||
// check if getfield has been resolved and read constant pool cache entry
|
||||
// check the validity of the cache entry by testing whether _indices field
|
||||
// contains Bytecode::_getfield in b1 byte.
|
||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
|
||||
__ movl(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
||||
__ shrl(rcx, 2*BitsPerByte);
|
||||
__ andl(rcx, 0xFF);
|
||||
__ cmpl(rcx, Bytecodes::_getfield);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// Note: constant pool entry is not valid before bytecode is resolved
|
||||
__ movptr(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
|
||||
__ movl(rdx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
Label notByte, notShort, notChar;
|
||||
const Address field_address (rax, rcx, Address::times_1);
|
||||
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask rdx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
#ifdef _LP64
|
||||
Label notObj;
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::notEqual, notObj);
|
||||
// atos
|
||||
__ movptr(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notObj);
|
||||
#endif // _LP64
|
||||
__ cmpl(rdx, btos);
|
||||
__ jcc(Assembler::notEqual, notByte);
|
||||
__ load_signed_byte(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmpl(rdx, stos);
|
||||
__ jcc(Assembler::notEqual, notShort);
|
||||
__ load_signed_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notShort);
|
||||
__ cmpl(rdx, ctos);
|
||||
__ jcc(Assembler::notEqual, notChar);
|
||||
__ load_unsigned_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notChar);
|
||||
#ifdef ASSERT
|
||||
Label okay;
|
||||
#ifndef _LP64
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
#endif // _LP64
|
||||
__ cmpl(rdx, itos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
__ stop("what type is this?");
|
||||
__ bind(okay);
|
||||
#endif // ASSERT
|
||||
// All the rest are a 32 bit wordsize
|
||||
__ movl(rax, field_address);
|
||||
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, sender_sp_on_entry); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
// We will enter c++ interpreter looking like it was
|
||||
// called by the call_stub this will cause it to return
|
||||
// a tosca result to the invoker which might have been
|
||||
// the c++ interpreter itself.
|
||||
|
||||
__ jmp(fast_accessor_slow_entry_path);
|
||||
return entry_point;
|
||||
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC) {
|
||||
@ -961,7 +807,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
//
|
||||
@ -1670,10 +1516,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// Fast accessor methods share this entry point.
|
||||
// This works because frame manager is in the same codelet
|
||||
if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
|
||||
|
||||
Label dispatch_entry_2;
|
||||
__ movptr(rcx, sender_sp_on_entry);
|
||||
__ movptr(state, (int32_t)NULL_WORD); // no current activation
|
||||
@ -2212,40 +2054,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : // fall thru
|
||||
entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
if (entry_point) return entry_point;
|
||||
|
||||
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
|
||||
|
||||
}
|
||||
|
||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||
: CppInterpreterGenerator(code) {
|
||||
|
66
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp
Normal file
66
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterGenerator.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
// Jump into normal path for accessor and empty entry to jump to normal entry
|
||||
// The "fast" optimization don't update compilation count therefore can disable inlining
|
||||
// for these functions that should be inlined.
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry_point = __ pc();
|
||||
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||
__ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// abstract method entry
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// pop return address, reset last_sp to NULL
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
#endif
|
||||
|
||||
// throw exception
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
|
||||
// the call_VM checks for exception, so we should never return here.
|
||||
__ should_not_reach_here();
|
||||
|
||||
return entry_point;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,8 +36,9 @@
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_Reference_get_entry();
|
||||
address generate_CRC32_update_entry();
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||
|
@ -67,45 +67,6 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Various method entries (that c++ and asm interpreter agree upon)
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
//
|
||||
//
|
||||
|
||||
// Empty method, generate a very fast return.
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry(void) {
|
||||
|
||||
// rbx,: Method*
|
||||
// rcx: receiver (unused)
|
||||
// rsi: previous interpreter state (C++ interpreter) must preserve
|
||||
// rsi: sender sp must set sp to this value on return
|
||||
|
||||
if (!UseFastEmptyMethods) return NULL;
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
Label slow_path;
|
||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// do nothing for empty methods (do not even increment invocation counter)
|
||||
// Code: _return
|
||||
// _return
|
||||
// return w/o popping parameters
|
||||
__ pop(rax);
|
||||
__ mov(rsp, rsi);
|
||||
__ jmp(rax);
|
||||
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||
|
||||
// rbx,: Method*
|
||||
@ -216,36 +177,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
|
||||
}
|
||||
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
// rbx,: Method*
|
||||
// rcx: receiver (unused)
|
||||
// rsi: previous interpreter state (C++ interpreter) must preserve
|
||||
|
||||
// rsi: sender SP
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// abstract method entry
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// pop return address, reset last_sp to NULL
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
#endif
|
||||
|
||||
// throw exception
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
|
||||
// the call_VM checks for exception, so we should never return here.
|
||||
__ should_not_reach_here();
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||
|
||||
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
||||
|
@ -301,66 +301,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
// rbx: Method*
|
||||
// r13: sender SP
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// abstract method entry
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// pop return address, reset last_sp to NULL
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
#endif
|
||||
|
||||
// throw exception
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::throw_AbstractMethodError));
|
||||
// the call_VM checks for exception, so we should never return here.
|
||||
__ should_not_reach_here();
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
// Empty method, generate a very fast return.
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry(void) {
|
||||
// rbx: Method*
|
||||
// r13: sender sp must set sp to this value on return
|
||||
|
||||
if (!UseFastEmptyMethods) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
Label slow_path;
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// do nothing for empty methods (do not even increment invocation counter)
|
||||
// Code: _return
|
||||
// _return
|
||||
// return w/o popping parameters
|
||||
__ pop(rax);
|
||||
__ mov(rsp, r13);
|
||||
__ jmp(rax);
|
||||
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
return entry_point;
|
||||
|
||||
}
|
||||
|
||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||
|
||||
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
||||
|
@ -38,7 +38,7 @@ int AbstractInterpreter::size_activation(int max_stack,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
||||
// in InterpreterGenerator::generate_fixed_frame.
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int overhead = frame::sender_sp_offset -
|
||||
|
@ -468,10 +468,10 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
// rax,
|
||||
|
||||
// NOTE: since the additional locals are also always pushed (wasn't obvious in
|
||||
// generate_method_entry) so the guard should work for them too.
|
||||
// generate_fixed_frame) so the guard should work for them too.
|
||||
//
|
||||
|
||||
// monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
|
||||
// monitor entry size: see picture of stack in frame_x86.hpp
|
||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
|
||||
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
|
||||
@ -633,145 +633,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ movptr(Address(rsp, 0), rsp); // set expression stack bottom
|
||||
}
|
||||
|
||||
// End of helpers
|
||||
|
||||
//
|
||||
// Various method entries
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
//
|
||||
//
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
|
||||
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// rbx,: Method*
|
||||
// rcx: receiver (preserve for slow entry into asm interpreter)
|
||||
|
||||
// rsi: senderSP must preserved for slow path, set SP to it on fast path
|
||||
|
||||
address entry_point = __ pc();
|
||||
Label xreturn_path;
|
||||
|
||||
// do fastpath for resolved accessor methods
|
||||
if (UseFastAccessorMethods) {
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
// ASM/C++ Interpreter
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
// rbx,: method
|
||||
// rcx: receiver
|
||||
__ movptr(rax, Address(rsp, wordSize));
|
||||
|
||||
// check if local 0 != NULL and read field
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
__ shrl(rdx, 2*BitsPerByte);
|
||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
||||
|
||||
// rax,: local 0
|
||||
// rbx,: method
|
||||
// rcx: receiver - do not destroy since it is needed for slow path!
|
||||
// rcx: scratch
|
||||
// rdx: constant pool cache index
|
||||
// rdi: constant pool cache
|
||||
// rsi: sender sp
|
||||
|
||||
// check if getfield has been resolved and read constant pool cache entry
|
||||
// check the validity of the cache entry by testing whether _indices field
|
||||
// contains Bytecode::_getfield in b1 byte.
|
||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
|
||||
__ movl(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
||||
__ shrl(rcx, 2*BitsPerByte);
|
||||
__ andl(rcx, 0xFF);
|
||||
__ cmpl(rcx, Bytecodes::_getfield);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// Note: constant pool entry is not valid before bytecode is resolved
|
||||
__ movptr(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
|
||||
__ movl(rdx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
Label notByte, notShort, notChar;
|
||||
const Address field_address (rax, rcx, Address::times_1);
|
||||
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask rdx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmpl(rdx, btos);
|
||||
__ jcc(Assembler::notEqual, notByte);
|
||||
__ load_signed_byte(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmpl(rdx, stos);
|
||||
__ jcc(Assembler::notEqual, notShort);
|
||||
__ load_signed_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notShort);
|
||||
__ cmpl(rdx, ctos);
|
||||
__ jcc(Assembler::notEqual, notChar);
|
||||
__ load_unsigned_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notChar);
|
||||
#ifdef ASSERT
|
||||
Label okay;
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
__ cmpl(rdx, itos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
__ stop("what type is this?");
|
||||
__ bind(okay);
|
||||
#endif // ASSERT
|
||||
// All the rest are a 32 bit wordsize
|
||||
// This is ok for now. Since fast accessors should be going away
|
||||
__ movptr(rax, field_address);
|
||||
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, rsi); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
|
||||
(void) generate_normal_entry(false);
|
||||
return entry_point;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
@ -862,7 +723,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1557,100 +1418,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
// Entry points
|
||||
//
|
||||
// Here we generate the various kind of entries into the interpreter.
|
||||
// The two main entry type are generic bytecode methods and native call method.
|
||||
// These both come in synchronized and non-synchronized versions but the
|
||||
// frame layout they create is very similar. The other method entry
|
||||
// types are really just special purpose entries that are really entry
|
||||
// and interpretation all in one. These are for trivial methods like
|
||||
// accessor, empty, or special math methods.
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// Arguments:
|
||||
//
|
||||
// rbx,: Method*
|
||||
// rcx: receiver
|
||||
//
|
||||
//
|
||||
// Stack layout immediately at entry
|
||||
//
|
||||
// [ return address ] <--- rsp
|
||||
// [ parameter n ]
|
||||
// ...
|
||||
// [ parameter 1 ]
|
||||
// [ expression stack ] (caller's java expression stack)
|
||||
|
||||
// Assuming that we don't go to one of the trivial specialized
|
||||
// entries the stack will look like below when we are ready to execute
|
||||
// the first bytecode (or call the native routine). The register usage
|
||||
// will be as the template based interpreter expects (see interpreter_x86.hpp).
|
||||
//
|
||||
// local variables follow incoming parameters immediately; i.e.
|
||||
// the return address is moved to the end of the locals).
|
||||
//
|
||||
// [ monitor entry ] <--- rsp
|
||||
// ...
|
||||
// [ monitor entry ]
|
||||
// [ expr. stack bottom ]
|
||||
// [ saved rsi ]
|
||||
// [ current rdi ]
|
||||
// [ Method* ]
|
||||
// [ saved rbp, ] <--- rbp,
|
||||
// [ return address ]
|
||||
// [ local variable m ]
|
||||
// ...
|
||||
// [ local variable 1 ]
|
||||
// [ parameter n ]
|
||||
// ...
|
||||
// [ parameter 1 ] <--- rdi
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ig_this->generate_Reference_get_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_update
|
||||
: entry_point = ig_this->generate_CRC32_update_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_updateBytes
|
||||
: // fall thru
|
||||
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
||||
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) return entry_point;
|
||||
|
||||
return ig_this->generate_normal_entry(synchronized);
|
||||
|
||||
}
|
||||
|
||||
// These should never be compiled since the interpreter will prefer
|
||||
// the compiled version to the intrinsic version.
|
||||
|
@ -400,7 +400,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
||||
// page mechanism will work for that.
|
||||
//
|
||||
// NOTE: Since the additional locals are also always pushed (wasn't
|
||||
// obvious in generate_method_entry) so the guard should work for them
|
||||
// obvious in generate_fixed_frame) so the guard should work for them
|
||||
// too.
|
||||
//
|
||||
// Args:
|
||||
@ -411,8 +411,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
||||
// rax
|
||||
void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
|
||||
// monitor entry size: see picture of stack set
|
||||
// (generate_method_entry) and frame_amd64.hpp
|
||||
// monitor entry size: see picture of stack in frame_x86.hpp
|
||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
|
||||
// total overhead size: entry_size + (saved rbp through expr stack
|
||||
@ -600,153 +599,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
|
||||
// End of helpers
|
||||
|
||||
// Various method entries
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
//
|
||||
//
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop
|
||||
// into vanilla (slow path) entry
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
// rbx: Method*
|
||||
|
||||
// r13: senderSP must preserver for slow path, set SP to it on fast path
|
||||
|
||||
address entry_point = __ pc();
|
||||
Label xreturn_path;
|
||||
|
||||
// do fastpath for resolved accessor methods
|
||||
if (UseFastAccessorMethods) {
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
|
||||
// thereof; parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
// rbx: method
|
||||
__ movptr(rax, Address(rsp, wordSize));
|
||||
|
||||
// check if local 0 != NULL and read field
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
__ shrl(rdx, 2 * BitsPerByte);
|
||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
||||
|
||||
// rax: local 0
|
||||
// rbx: method
|
||||
// rdx: constant pool cache index
|
||||
// rdi: constant pool cache
|
||||
|
||||
// check if getfield has been resolved and read constant pool cache entry
|
||||
// check the validity of the cache entry by testing whether _indices field
|
||||
// contains Bytecode::_getfield in b1 byte.
|
||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4,
|
||||
"adjust shift below");
|
||||
__ movl(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_8,
|
||||
ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::indices_offset()));
|
||||
__ shrl(rcx, 2 * BitsPerByte);
|
||||
__ andl(rcx, 0xFF);
|
||||
__ cmpl(rcx, Bytecodes::_getfield);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// Note: constant pool entry is not valid before bytecode is resolved
|
||||
__ movptr(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_8,
|
||||
ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::f2_offset()));
|
||||
// edx: flags
|
||||
__ movl(rdx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_8,
|
||||
ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
Label notObj, notInt, notByte, notShort;
|
||||
const Address field_address(rax, rcx, Address::times_1);
|
||||
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask edx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::notEqual, notObj);
|
||||
// atos
|
||||
__ load_heap_oop(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notObj);
|
||||
__ cmpl(rdx, itos);
|
||||
__ jcc(Assembler::notEqual, notInt);
|
||||
// itos
|
||||
__ movl(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notInt);
|
||||
__ cmpl(rdx, btos);
|
||||
__ jcc(Assembler::notEqual, notByte);
|
||||
// btos
|
||||
__ load_signed_byte(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmpl(rdx, stos);
|
||||
__ jcc(Assembler::notEqual, notShort);
|
||||
// stos
|
||||
__ load_signed_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notShort);
|
||||
#ifdef ASSERT
|
||||
Label okay;
|
||||
__ cmpl(rdx, ctos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
__ stop("what type is this?");
|
||||
__ bind(okay);
|
||||
#endif
|
||||
// ctos
|
||||
__ load_unsigned_short(rax, field_address);
|
||||
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ pop(rdi);
|
||||
__ mov(rsp, r13);
|
||||
__ jmp(rdi);
|
||||
__ ret(0);
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
} else {
|
||||
(void) generate_normal_entry(false);
|
||||
}
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
@ -773,8 +625,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
//
|
||||
// rbx: Method*
|
||||
|
||||
// r13: senderSP must preserve for slow path, set SP to it on fast path
|
||||
@ -832,7 +682,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1566,100 +1416,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Entry points
|
||||
//
|
||||
// Here we generate the various kind of entries into the interpreter.
|
||||
// The two main entry type are generic bytecode methods and native
|
||||
// call method. These both come in synchronized and non-synchronized
|
||||
// versions but the frame layout they create is very similar. The
|
||||
// other method entry types are really just special purpose entries
|
||||
// that are really entry and interpretation all in one. These are for
|
||||
// trivial methods like accessor, empty, or special math methods.
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// Arguments:
|
||||
//
|
||||
// rbx: Method*
|
||||
//
|
||||
// Stack layout immediately at entry
|
||||
//
|
||||
// [ return address ] <--- rsp
|
||||
// [ parameter n ]
|
||||
// ...
|
||||
// [ parameter 1 ]
|
||||
// [ expression stack ] (caller's java expression stack)
|
||||
|
||||
// Assuming that we don't go to one of the trivial specialized entries
|
||||
// the stack will look like below when we are ready to execute the
|
||||
// first bytecode (or call the native routine). The register usage
|
||||
// will be as the template based interpreter expects (see
|
||||
// interpreter_amd64.hpp).
|
||||
//
|
||||
// local variables follow incoming parameters immediately; i.e.
|
||||
// the return address is moved to the end of the locals).
|
||||
//
|
||||
// [ monitor entry ] <--- rsp
|
||||
// ...
|
||||
// [ monitor entry ]
|
||||
// [ expr. stack bottom ]
|
||||
// [ saved r13 ]
|
||||
// [ current r14 ]
|
||||
// [ Method* ]
|
||||
// [ saved ebp ] <--- rbp
|
||||
// [ return address ]
|
||||
// [ local variable m ]
|
||||
// ...
|
||||
// [ local variable 1 ]
|
||||
// [ parameter n ]
|
||||
// ...
|
||||
// [ parameter 1 ] <--- r14
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(
|
||||
AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ig_this->generate_Reference_get_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_update
|
||||
: entry_point = ig_this->generate_CRC32_update_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_updateBytes
|
||||
: // fall thru
|
||||
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
||||
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
return ig_this->generate_normal_entry(synchronized);
|
||||
}
|
||||
|
||||
// These should never be compiled since the interpreter will prefer
|
||||
// the compiled version to the intrinsic version.
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "vm_version_x86.hpp"
|
||||
|
||||
@ -514,7 +515,7 @@ void VM_Version::get_processor_features() {
|
||||
(supports_tscinv() ? ", tscinv": ""),
|
||||
(supports_bmi1() ? ", bmi1" : ""),
|
||||
(supports_bmi2() ? ", bmi2" : ""));
|
||||
_features_str = strdup(buf);
|
||||
_features_str = os::strdup(buf);
|
||||
|
||||
// UseSSE is set to the smaller of what hardware supports and what
|
||||
// the command line requires. I.e., you cannot set UseSSE to 2 on
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -831,60 +831,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return generate_entry((address) CppInterpreter::normal_entry);
|
||||
}
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(
|
||||
AbstractInterpreter::MethodKind kind) {
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals:
|
||||
case Interpreter::zerolocals_synchronized:
|
||||
break;
|
||||
|
||||
case Interpreter::native:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
|
||||
break;
|
||||
|
||||
case Interpreter::native_synchronized:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
|
||||
break;
|
||||
|
||||
case Interpreter::empty:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();
|
||||
break;
|
||||
|
||||
case Interpreter::accessor:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();
|
||||
break;
|
||||
|
||||
case Interpreter::abstract:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
|
||||
break;
|
||||
|
||||
case Interpreter::java_lang_math_sin:
|
||||
case Interpreter::java_lang_math_cos:
|
||||
case Interpreter::java_lang_math_tan:
|
||||
case Interpreter::java_lang_math_abs:
|
||||
case Interpreter::java_lang_math_log:
|
||||
case Interpreter::java_lang_math_log10:
|
||||
case Interpreter::java_lang_math_sqrt:
|
||||
case Interpreter::java_lang_math_pow:
|
||||
case Interpreter::java_lang_math_exp:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
|
||||
break;
|
||||
|
||||
case Interpreter::java_lang_ref_reference_get:
|
||||
entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (entry_point == NULL)
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_normal_entry(false);
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||
: CppInterpreterGenerator(code) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -61,6 +61,12 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
|
||||
|
||||
define_pd_global(uintx, TypeProfileLevel, 0);
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
product(bool, UseFastEmptyMethods, true, \
|
||||
"Use fast method entry code for empty methods") \
|
||||
\
|
||||
product(bool, UseFastAccessorMethods, true, \
|
||||
"Use fast method entry code for accessor methods") \
|
||||
\
|
||||
|
||||
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -39,4 +39,7 @@
|
||||
address generate_accessor_entry();
|
||||
address generate_Reference_get_entry();
|
||||
|
||||
// Not supported
|
||||
address generate_CRC32_update_entry() { return NULL; }
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
|
||||
|
@ -58,6 +58,7 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/osThread.hpp"
|
||||
#include "runtime/perfMemory.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
@ -378,10 +379,10 @@ void os::Aix::query_multipage_support() {
|
||||
// default should be 4K.
|
||||
size_t data_page_size = SIZE_4K;
|
||||
{
|
||||
void* p = ::malloc(SIZE_16M);
|
||||
void* p = os::malloc(SIZE_16M, mtInternal);
|
||||
guarantee(p != NULL, "malloc failed");
|
||||
data_page_size = os::Aix::query_pagesize(p);
|
||||
::free(p);
|
||||
os::free(p);
|
||||
}
|
||||
|
||||
// query default shm page size (LDR_CNTRL SHMPSIZE)
|
||||
|
@ -24,6 +24,8 @@
|
||||
|
||||
#include "asm/assembler.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "loadlib_aix.hpp"
|
||||
#include "porting_aix.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -83,7 +85,7 @@ class fixed_strings {
|
||||
while (n) {
|
||||
node* p = n;
|
||||
n = n->next;
|
||||
free(p->v);
|
||||
os::free(p->v);
|
||||
delete p;
|
||||
}
|
||||
}
|
||||
@ -95,7 +97,7 @@ class fixed_strings {
|
||||
}
|
||||
}
|
||||
node* p = new node;
|
||||
p->v = strdup(s);
|
||||
p->v = os::strdup_check_oom(s);
|
||||
p->next = first;
|
||||
first = p;
|
||||
return p->v;
|
||||
|
@ -2439,23 +2439,25 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
tkr.discard();
|
||||
return false;
|
||||
return shmdt(base) == 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
size_t os::large_page_size() {
|
||||
|
@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -918,7 +918,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -3504,9 +3504,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
|
||||
|
||||
assert(is_ptr_aligned(start, alignment), "Must be");
|
||||
|
||||
// os::reserve_memory_special will record this memory area.
|
||||
// Need to release it here to prevent overlapping reservations.
|
||||
MemTracker::record_virtual_memory_release((address)start, bytes);
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
// os::reserve_memory_special will record this memory area.
|
||||
// Need to release it here to prevent overlapping reservations.
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
tkr.record((address)start, bytes);
|
||||
}
|
||||
|
||||
char* end = start + bytes;
|
||||
|
||||
@ -3601,7 +3604,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
return addr;
|
||||
@ -3617,24 +3620,30 @@ bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
assert(UseLargePages, "only for large pages");
|
||||
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
|
||||
bool res;
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
res = os::Linux::release_memory_special_impl(base, bytes);
|
||||
if (res) {
|
||||
tkr.record((address)base, bytes);
|
||||
}
|
||||
|
||||
} else {
|
||||
res = os::Linux::release_memory_special_impl(base, bytes);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
|
||||
assert(UseLargePages, "only for large pages");
|
||||
bool res;
|
||||
|
||||
if (UseSHM) {
|
||||
res = os::Linux::release_memory_special_shm(base, bytes);
|
||||
} else {
|
||||
assert(UseHugeTLBFS, "must be");
|
||||
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
|
||||
}
|
||||
|
||||
if (res) {
|
||||
tkr.record((address)base, bytes);
|
||||
} else {
|
||||
tkr.discard();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -108,6 +108,7 @@ class Linux {
|
||||
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
|
||||
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||
|
||||
static bool release_memory_special_impl(char* base, size_t bytes);
|
||||
static bool release_memory_special_shm(char* base, size_t bytes);
|
||||
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
|
||||
|
||||
|
@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -924,7 +924,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -75,21 +75,41 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
||||
VMError::report_coredump_status(buffer, success);
|
||||
}
|
||||
|
||||
address os::get_caller_pc(int n) {
|
||||
int os::get_native_stack(address* stack, int frames, int toSkip) {
|
||||
#ifdef _NMT_NOINLINE_
|
||||
n ++;
|
||||
toSkip++;
|
||||
#endif
|
||||
|
||||
int frame_idx = 0;
|
||||
int num_of_frames; // number of frames captured
|
||||
frame fr = os::current_frame();
|
||||
while (n > 0 && fr.pc() &&
|
||||
!os::is_first_C_frame(&fr) && fr.sender_pc()) {
|
||||
fr = os::get_sender_for_C_frame(&fr);
|
||||
n --;
|
||||
while (fr.pc() && frame_idx < frames) {
|
||||
if (toSkip > 0) {
|
||||
toSkip --;
|
||||
} else {
|
||||
stack[frame_idx ++] = fr.pc();
|
||||
}
|
||||
if (fr.fp() == NULL || os::is_first_C_frame(&fr)
|
||||
||fr.sender_pc() == NULL || fr.cb() != NULL) break;
|
||||
|
||||
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
|
||||
fr = os::get_sender_for_C_frame(&fr);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (n == 0) {
|
||||
return fr.pc();
|
||||
} else {
|
||||
return NULL;
|
||||
num_of_frames = frame_idx;
|
||||
for (; frame_idx < frames; frame_idx ++) {
|
||||
stack[frame_idx] = NULL;
|
||||
}
|
||||
|
||||
return num_of_frames;
|
||||
}
|
||||
|
||||
|
||||
bool os::unsetenv(const char* name) {
|
||||
assert(name != NULL, "Null pointer");
|
||||
return (::unsetenv(name) == 0);
|
||||
}
|
||||
|
||||
int os::get_last_error() {
|
||||
|
@ -770,7 +770,8 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||
size, CURRENT_PC, mtInternal);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -941,7 +942,8 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||
size, CURRENT_PC, mtInternal);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -138,9 +138,8 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
|
||||
// Workaround for issue when a custom launcher doesn't call
|
||||
// DestroyJavaVM and NMT is trying to track memory when free is
|
||||
// called from a static destructor
|
||||
if (MemTracker::is_on()) {
|
||||
MemTracker::shutdown(MemTracker::NMT_normal);
|
||||
}
|
||||
MemTracker::shutdown();
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -163,6 +162,10 @@ bool os::getenv(const char* name, char* buffer, int len) {
|
||||
return result > 0 && result < len;
|
||||
}
|
||||
|
||||
bool os::unsetenv(const char* name) {
|
||||
assert(name != NULL, "Null pointer");
|
||||
return (SetEnvironmentVariable(name, NULL) == TRUE);
|
||||
}
|
||||
|
||||
// No setuid programs under Windows.
|
||||
bool os::have_special_privileges() {
|
||||
@ -319,15 +322,16 @@ extern "C" void breakpoint() {
|
||||
* So far, this method is only used by Native Memory Tracking, which is
|
||||
* only supported on Windows XP or later.
|
||||
*/
|
||||
address os::get_caller_pc(int n) {
|
||||
int os::get_native_stack(address* stack, int frames, int toSkip) {
|
||||
#ifdef _NMT_NOINLINE_
|
||||
n++;
|
||||
toSkip ++;
|
||||
#endif
|
||||
address pc;
|
||||
if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
|
||||
return pc;
|
||||
int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
|
||||
(PVOID*)stack, NULL);
|
||||
for (int index = captured; index < frames; index ++) {
|
||||
stack[index] = NULL;
|
||||
}
|
||||
return NULL;
|
||||
return captured;
|
||||
}
|
||||
|
||||
|
||||
@ -2901,7 +2905,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
PAGE_READWRITE);
|
||||
// If reservation failed, return NULL
|
||||
if (p_buf == NULL) return NULL;
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes + chunk_size);
|
||||
|
||||
// we still need to round up to a page boundary (in case we are using large pages)
|
||||
@ -2967,7 +2971,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
// need to create a dummy 'reserve' record to match
|
||||
// the release.
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf,
|
||||
bytes_to_release, mtNone, CALLER_PC);
|
||||
bytes_to_release, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes_to_release);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
@ -2986,11 +2990,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
}
|
||||
// Although the memory is allocated individually, it is returned as one.
|
||||
// NMT records it as one block.
|
||||
address pc = CALLER_PC;
|
||||
if ((flags & MEM_COMMIT) != 0) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
|
||||
} else {
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
// made it this far, success
|
||||
@ -3188,8 +3191,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, boo
|
||||
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
||||
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
|
||||
if (res != NULL) {
|
||||
address pc = CALLER_PC;
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "os_windows.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/perfMemory.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
@ -1388,7 +1389,7 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
|
||||
// the file has been successfully created and the file mapping
|
||||
// object has been created.
|
||||
sharedmem_fileHandle = fh;
|
||||
sharedmem_fileName = strdup(filename);
|
||||
sharedmem_fileName = os::strdup(filename);
|
||||
|
||||
return fmh;
|
||||
}
|
||||
@ -1498,7 +1499,8 @@ static char* mapping_create_shared(size_t size) {
|
||||
(void)memset(mapAddress, '\0', size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||
size, CURRENT_PC, mtInternal);
|
||||
|
||||
return (char*) mapAddress;
|
||||
}
|
||||
@ -1680,7 +1682,8 @@ static void open_file_mapping(const char* user, int vmid,
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
|
||||
CURRENT_PC, mtInternal);
|
||||
|
||||
|
||||
*addrp = (char*)mapAddress;
|
||||
@ -1834,10 +1837,14 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
||||
return;
|
||||
}
|
||||
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
remove_file_mapping(addr);
|
||||
// it does not go through os api, the operation has to record from here
|
||||
tkr.record((address)addr, bytes);
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
// it does not go through os api, the operation has to record from here
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
remove_file_mapping(addr);
|
||||
tkr.record((address)addr, bytes);
|
||||
} else {
|
||||
remove_file_mapping(addr);
|
||||
}
|
||||
}
|
||||
|
||||
char* PerfMemory::backing_store_filename() {
|
||||
|
@ -23,6 +23,8 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "vm_version_sparc.hpp"
|
||||
|
||||
@ -48,7 +50,7 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
|
||||
// All SI defines used below must be supported.
|
||||
guarantee(bufsize != -1, "must be supported");
|
||||
|
||||
char* buf = (char*) malloc(bufsize);
|
||||
char* buf = (char*) os::malloc(bufsize, mtInternal);
|
||||
|
||||
if (buf == NULL)
|
||||
return;
|
||||
@ -60,7 +62,7 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
|
||||
}
|
||||
}
|
||||
|
||||
free(buf);
|
||||
os::free(buf);
|
||||
}
|
||||
|
||||
int VM_Version::platform_features(int features) {
|
||||
@ -171,7 +173,7 @@ int VM_Version::platform_features(int features) {
|
||||
}
|
||||
#endif
|
||||
// Convert to UPPER case before compare.
|
||||
char* impl = strdup(implementation);
|
||||
char* impl = os::strdup_check_oom(implementation);
|
||||
|
||||
for (int i = 0; impl[i] != 0; i++)
|
||||
impl[i] = (char)toupper((uint)impl[i]);
|
||||
@ -198,7 +200,7 @@ int VM_Version::platform_features(int features) {
|
||||
implementation = "SPARC";
|
||||
}
|
||||
}
|
||||
free((void*)impl);
|
||||
os::free((void*)impl);
|
||||
break;
|
||||
}
|
||||
} // for(
|
||||
|
@ -269,7 +269,7 @@ address CodeBuffer::decode_begin() {
|
||||
|
||||
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
|
||||
if (_overflow_arena == NULL) {
|
||||
_overflow_arena = new (mtCode) Arena();
|
||||
_overflow_arena = new (mtCode) Arena(mtCode);
|
||||
}
|
||||
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ Compiler::Compiler() : AbstractCompiler(c1) {
|
||||
|
||||
void Compiler::init_c1_runtime() {
|
||||
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
|
||||
Arena* arena = new (mtCompiler) Arena();
|
||||
Arena* arena = new (mtCompiler) Arena(mtCompiler);
|
||||
Runtime1::initialize(buffer_blob);
|
||||
FrameMap::initialize();
|
||||
// initialize data structures
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_ValueStack.hpp"
|
||||
#include "ci/ciInstance.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
|
||||
// we must have enough patching space so that call can be inserted
|
||||
@ -848,7 +849,7 @@ void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
|
||||
stringStream st;
|
||||
st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
|
||||
#ifdef SPARC
|
||||
_masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
|
||||
_masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
|
||||
#else
|
||||
_masm->verify_oop(r->as_Register());
|
||||
#endif
|
||||
|
@ -86,7 +86,8 @@ static bool firstEnv = true;
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciEnv::ciEnv
|
||||
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
|
||||
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
|
||||
: _ciEnv_arena(mtCompiler) {
|
||||
VM_ENTRY_MARK;
|
||||
|
||||
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
|
||||
@ -144,7 +145,7 @@ ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
|
||||
_jvmti_can_pop_frame = false;
|
||||
}
|
||||
|
||||
ciEnv::ciEnv(Arena* arena) {
|
||||
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
|
||||
ASSERT_IN_VM;
|
||||
|
||||
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -112,7 +112,7 @@ void ciObjectFactory::initialize() {
|
||||
// This Arena is long lived and exists in the resource mark of the
|
||||
// compiler thread that initializes the initial ciObjectFactory which
|
||||
// creates the shared ciObjects that all later ciObjectFactories use.
|
||||
Arena* arena = new (mtCompiler) Arena();
|
||||
Arena* arena = new (mtCompiler) Arena(mtCompiler);
|
||||
ciEnv initial(arena);
|
||||
ciEnv* env = ciEnv::current();
|
||||
env->_factory->init_shared_objects();
|
||||
|
@ -273,13 +273,17 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
|
||||
}
|
||||
|
||||
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
|
||||
_path = strdup(path);
|
||||
_path = os::strdup_check_oom(path);
|
||||
_st = *st;
|
||||
_meta_index = NULL;
|
||||
_resolved_entry = NULL;
|
||||
_has_error = false;
|
||||
}
|
||||
|
||||
LazyClassPathEntry::~LazyClassPathEntry() {
|
||||
os::free(_path);
|
||||
}
|
||||
|
||||
bool LazyClassPathEntry::is_jar_file() {
|
||||
return ((_st.st_mode & S_IFREG) == S_IFREG);
|
||||
}
|
||||
@ -416,7 +420,7 @@ void ClassLoader::setup_meta_index() {
|
||||
default:
|
||||
{
|
||||
if (!skipCurrentJar && cur_entry != NULL) {
|
||||
char* new_name = strdup(package_name);
|
||||
char* new_name = os::strdup_check_oom(package_name);
|
||||
boot_class_path_packages.append(new_name);
|
||||
}
|
||||
}
|
||||
@ -438,7 +442,7 @@ void ClassLoader::setup_meta_index() {
|
||||
|
||||
void ClassLoader::setup_bootstrap_search_path() {
|
||||
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
|
||||
char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
|
||||
char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
|
||||
if (TraceClassLoading && Verbose) {
|
||||
tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
|
||||
}
|
||||
@ -460,6 +464,7 @@ void ClassLoader::setup_bootstrap_search_path() {
|
||||
end++;
|
||||
}
|
||||
}
|
||||
os::free(sys_class_path);
|
||||
}
|
||||
|
||||
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
|
||||
|
@ -128,6 +128,8 @@ class LazyClassPathEntry: public ClassPathEntry {
|
||||
bool is_jar_file();
|
||||
const char* name() { return _path; }
|
||||
LazyClassPathEntry(char* path, const struct stat* st);
|
||||
virtual ~LazyClassPathEntry();
|
||||
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
|
||||
virtual bool is_lazy();
|
||||
|
@ -130,15 +130,13 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_dom
|
||||
}
|
||||
|
||||
|
||||
bool Dictionary::do_unloading() {
|
||||
void Dictionary::do_unloading() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
||||
bool class_was_unloaded = false;
|
||||
int index = 0; // Defined here for portability! Do not move
|
||||
|
||||
// Remove unloadable entries and classes from system dictionary
|
||||
// The placeholder array has been handled in always_strong_oops_do.
|
||||
DictionaryEntry* probe = NULL;
|
||||
for (index = 0; index < table_size(); index++) {
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
|
||||
probe = *p;
|
||||
Klass* e = probe->klass();
|
||||
@ -158,16 +156,8 @@ bool Dictionary::do_unloading() {
|
||||
// Do we need to delete this system dictionary entry?
|
||||
if (loader_data->is_unloading()) {
|
||||
// If the loader is not live this entry should always be
|
||||
// removed (will never be looked up again). Note that this is
|
||||
// not the same as unloading the referred class.
|
||||
if (k_def_class_loader_data == loader_data) {
|
||||
// This is the defining entry, so the referred class is about
|
||||
// to be unloaded.
|
||||
class_was_unloaded = true;
|
||||
}
|
||||
// Also remove this system dictionary entry.
|
||||
// removed (will never be looked up again).
|
||||
purge_entry = true;
|
||||
|
||||
} else {
|
||||
// The loader in this entry is alive. If the klass is dead,
|
||||
// (determined by checking the defining class loader)
|
||||
@ -196,7 +186,6 @@ bool Dictionary::do_unloading() {
|
||||
p = probe->next_addr();
|
||||
}
|
||||
}
|
||||
return class_was_unloaded;
|
||||
}
|
||||
|
||||
void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
|
||||
|
@ -108,9 +108,8 @@ public:
|
||||
return (loader_data->is_the_null_class_loader_data() || !ClassUnloading);
|
||||
}
|
||||
|
||||
// Unload (that is, break root links to) all unmarked classes and
|
||||
// loaders. Returns "true" iff something was unloaded.
|
||||
bool do_unloading();
|
||||
// Unload (that is, break root links to) all unmarked classes and loaders.
|
||||
void do_unloading();
|
||||
|
||||
// Protection domains
|
||||
Klass* find(int index, unsigned int hash, Symbol* name,
|
||||
|
@ -70,9 +70,9 @@ Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS
|
||||
void SymbolTable::initialize_symbols(int arena_alloc_size) {
|
||||
// Initialize the arena for global symbols, size passed in depends on CDS.
|
||||
if (arena_alloc_size == 0) {
|
||||
_arena = new (mtSymbol) Arena();
|
||||
_arena = new (mtSymbol) Arena(mtSymbol);
|
||||
} else {
|
||||
_arena = new (mtSymbol) Arena(arena_alloc_size);
|
||||
_arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1661,10 +1661,9 @@ public:
|
||||
// Note: anonymous classes are not in the SD.
|
||||
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
|
||||
// First, mark for unload all ClassLoaderData referencing a dead class loader.
|
||||
bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive);
|
||||
bool unloading_occurred = false;
|
||||
if (has_dead_loaders) {
|
||||
unloading_occurred = dictionary()->do_unloading();
|
||||
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive);
|
||||
if (unloading_occurred) {
|
||||
dictionary()->do_unloading();
|
||||
constraints()->purge_loader_constraints();
|
||||
resolution_errors()->purge_resolution_errors();
|
||||
}
|
||||
|
@ -2217,6 +2217,181 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
|
||||
}
|
||||
}
|
||||
|
||||
// Look at the method's handlers. If the bci is in the handler's try block
|
||||
// then check if the handler_pc is already on the stack. If not, push it.
|
||||
void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
|
||||
GrowableArray<u4>* handler_stack,
|
||||
u4 bci) {
|
||||
int exlength = exhandlers->length();
|
||||
for(int x = 0; x < exlength; x++) {
|
||||
if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
|
||||
handler_stack->append_if_missing(exhandlers->handler_pc(x));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return TRUE if all code paths starting with start_bc_offset end in
|
||||
// bytecode athrow or loop.
|
||||
bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
|
||||
ResourceMark rm;
|
||||
// Create bytecode stream.
|
||||
RawBytecodeStream bcs(method());
|
||||
u4 code_length = method()->code_size();
|
||||
bcs.set_start(start_bc_offset);
|
||||
u4 target;
|
||||
// Create stack for storing bytecode start offsets for if* and *switch.
|
||||
GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
|
||||
// Create stack for handlers for try blocks containing this handler.
|
||||
GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
|
||||
// Create list of visited branch opcodes (goto* and if*).
|
||||
GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
|
||||
ExceptionTable exhandlers(_method());
|
||||
|
||||
while (true) {
|
||||
if (bcs.is_last_bytecode()) {
|
||||
// if no more starting offsets to parse or if at the end of the
|
||||
// method then return false.
|
||||
if ((bci_stack->is_empty()) || ((u4)bcs.end_bci() == code_length))
|
||||
return false;
|
||||
// Pop a bytecode starting offset and scan from there.
|
||||
bcs.set_start(bci_stack->pop());
|
||||
}
|
||||
Bytecodes::Code opcode = bcs.raw_next();
|
||||
u4 bci = bcs.bci();
|
||||
|
||||
// If the bytecode is in a TRY block, push its handlers so they
|
||||
// will get parsed.
|
||||
push_handlers(&exhandlers, handler_stack, bci);
|
||||
|
||||
switch (opcode) {
|
||||
case Bytecodes::_if_icmpeq:
|
||||
case Bytecodes::_if_icmpne:
|
||||
case Bytecodes::_if_icmplt:
|
||||
case Bytecodes::_if_icmpge:
|
||||
case Bytecodes::_if_icmpgt:
|
||||
case Bytecodes::_if_icmple:
|
||||
case Bytecodes::_ifeq:
|
||||
case Bytecodes::_ifne:
|
||||
case Bytecodes::_iflt:
|
||||
case Bytecodes::_ifge:
|
||||
case Bytecodes::_ifgt:
|
||||
case Bytecodes::_ifle:
|
||||
case Bytecodes::_if_acmpeq:
|
||||
case Bytecodes::_if_acmpne:
|
||||
case Bytecodes::_ifnull:
|
||||
case Bytecodes::_ifnonnull:
|
||||
target = bcs.dest();
|
||||
if (visited_branches->contains(bci)) {
|
||||
if (bci_stack->is_empty()) return true;
|
||||
// Pop a bytecode starting offset and scan from there.
|
||||
bcs.set_start(bci_stack->pop());
|
||||
} else {
|
||||
if (target > bci) { // forward branch
|
||||
if (target >= code_length) return false;
|
||||
// Push the branch target onto the stack.
|
||||
bci_stack->push(target);
|
||||
// then, scan bytecodes starting with next.
|
||||
bcs.set_start(bcs.next_bci());
|
||||
} else { // backward branch
|
||||
// Push bytecode offset following backward branch onto the stack.
|
||||
bci_stack->push(bcs.next_bci());
|
||||
// Check bytecodes starting with branch target.
|
||||
bcs.set_start(target);
|
||||
}
|
||||
// Record target so we don't branch here again.
|
||||
visited_branches->append(bci);
|
||||
}
|
||||
break;
|
||||
|
||||
case Bytecodes::_goto:
|
||||
case Bytecodes::_goto_w:
|
||||
target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
|
||||
if (visited_branches->contains(bci)) {
|
||||
if (bci_stack->is_empty()) return true;
|
||||
// Been here before, pop new starting offset from stack.
|
||||
bcs.set_start(bci_stack->pop());
|
||||
} else {
|
||||
if (target >= code_length) return false;
|
||||
// Continue scanning from the target onward.
|
||||
bcs.set_start(target);
|
||||
// Record target so we don't branch here again.
|
||||
visited_branches->append(bci);
|
||||
}
|
||||
break;
|
||||
|
||||
// Check that all switch alternatives end in 'athrow' bytecodes. Since it
|
||||
// is difficult to determine where each switch alternative ends, parse
|
||||
// each switch alternative until either hit a 'return', 'athrow', or reach
|
||||
// the end of the method's bytecodes. This is gross but should be okay
|
||||
// because:
|
||||
// 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit
|
||||
// constructor invocations should be rare.
|
||||
// 2. if each switch alternative ends in an athrow then the parsing should be
|
||||
// short. If there is no athrow then it is bogus code, anyway.
|
||||
case Bytecodes::_lookupswitch:
|
||||
case Bytecodes::_tableswitch:
|
||||
{
|
||||
address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize);
|
||||
u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci;
|
||||
int keys, delta;
|
||||
if (opcode == Bytecodes::_tableswitch) {
|
||||
jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
|
||||
jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
|
||||
// This is invalid, but let the regular bytecode verifier
|
||||
// report this because the user will get a better error message.
|
||||
if (low > high) return true;
|
||||
keys = high - low + 1;
|
||||
delta = 1;
|
||||
} else {
|
||||
keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
|
||||
delta = 2;
|
||||
}
|
||||
// Invalid, let the regular bytecode verifier deal with it.
|
||||
if (keys < 0) return true;
|
||||
|
||||
// Push the offset of the next bytecode onto the stack.
|
||||
bci_stack->push(bcs.next_bci());
|
||||
|
||||
// Push the switch alternatives onto the stack.
|
||||
for (int i = 0; i < keys; i++) {
|
||||
u4 target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
|
||||
if (target > code_length) return false;
|
||||
bci_stack->push(target);
|
||||
}
|
||||
|
||||
// Start bytecode parsing for the switch at the default alternative.
|
||||
if (default_offset > code_length) return false;
|
||||
bcs.set_start(default_offset);
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_return:
|
||||
return false;
|
||||
|
||||
case Bytecodes::_athrow:
|
||||
{
|
||||
if (bci_stack->is_empty()) {
|
||||
if (handler_stack->is_empty()) {
|
||||
return true;
|
||||
} else {
|
||||
// Parse the catch handlers for try blocks containing athrow.
|
||||
bcs.set_start(handler_stack->pop());
|
||||
}
|
||||
} else {
|
||||
// Pop a bytecode offset and starting scanning from there.
|
||||
bcs.set_start(bci_stack->pop());
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
;
|
||||
} // end switch
|
||||
} // end while loop
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ClassVerifier::verify_invoke_init(
|
||||
RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
|
||||
StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
|
||||
@ -2236,18 +2411,26 @@ void ClassVerifier::verify_invoke_init(
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure that this call is not done from within a TRY block because
|
||||
// that can result in returning an incomplete object. Simply checking
|
||||
// (bci >= start_pc) also ensures that this call is not done after a TRY
|
||||
// block. That is also illegal because this call must be the first Java
|
||||
// statement in the constructor.
|
||||
// Check if this call is done from inside of a TRY block. If so, make
|
||||
// sure that all catch clause paths end in a throw. Otherwise, this
|
||||
// can result in returning an incomplete object.
|
||||
ExceptionTable exhandlers(_method());
|
||||
int exlength = exhandlers.length();
|
||||
for(int i = 0; i < exlength; i++) {
|
||||
if (bci >= exhandlers.start_pc(i)) {
|
||||
verify_error(ErrorContext::bad_code(bci),
|
||||
"Bad <init> method call from after the start of a try block");
|
||||
return;
|
||||
u2 start_pc = exhandlers.start_pc(i);
|
||||
u2 end_pc = exhandlers.end_pc(i);
|
||||
|
||||
if (bci >= start_pc && bci < end_pc) {
|
||||
if (!ends_in_athrow(exhandlers.handler_pc(i))) {
|
||||
verify_error(ErrorContext::bad_code(bci),
|
||||
"Bad <init> method call from after the start of a try block");
|
||||
return;
|
||||
} else if (VerboseVerification) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr(
|
||||
"Survived call to ends_in_athrow(): %s",
|
||||
current_class()->name()->as_C_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
// The verifier class
|
||||
@ -303,6 +304,16 @@ class ClassVerifier : public StackObj {
|
||||
StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
|
||||
constantPoolHandle cp, TRAPS);
|
||||
|
||||
// Used by ends_in_athrow() to push all handlers that contain bci onto
|
||||
// the handler_stack, if the handler is not already on the stack.
|
||||
void push_handlers(ExceptionTable* exhandlers,
|
||||
GrowableArray<u4>* handler_stack,
|
||||
u4 bci);
|
||||
|
||||
// Returns true if all paths starting with start_bc_offset end in athrow
|
||||
// bytecode or loop.
|
||||
bool ends_in_athrow(u4 start_bc_offset);
|
||||
|
||||
void verify_invoke_instructions(
|
||||
RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
|
||||
bool* this_uninit, VerificationType return_type,
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
class MethodMatcher : public CHeapObj<mtCompiler> {
|
||||
public:
|
||||
@ -175,7 +176,11 @@ class MethodOptionMatcher: public MethodMatcher {
|
||||
Symbol* method_name, Mode method_mode,
|
||||
Symbol* signature, const char * opt, MethodMatcher* next):
|
||||
MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next) {
|
||||
option = opt;
|
||||
option = os::strdup_check_oom(opt);
|
||||
}
|
||||
|
||||
virtual ~MethodOptionMatcher() {
|
||||
os::free((void*)option);
|
||||
}
|
||||
|
||||
bool match(methodHandle method, const char* opt) {
|
||||
@ -498,7 +503,7 @@ void CompilerOracle::parse_from_line(char* line) {
|
||||
tty->print("CompilerOracle: %s ", command_names[command]);
|
||||
match->print();
|
||||
}
|
||||
match = add_option_string(c_name, c_match, m_name, m_match, signature, strdup(option));
|
||||
match = add_option_string(c_name, c_match, m_name, m_match, signature, option);
|
||||
line += bytes_read;
|
||||
}
|
||||
} else {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,8 @@ void ConcurrentMarkSweepPolicy::initialize_alignments() {
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
|
||||
CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||
if (_generations == NULL)
|
||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||
|
||||
|
@ -2582,15 +2582,12 @@ bool G1CollectedHeap::is_in(const void* p) const {
|
||||
|
||||
// Iteration functions.
|
||||
|
||||
// Iterates an OopClosure over all ref-containing fields of objects
|
||||
// within a HeapRegion.
|
||||
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
|
||||
|
||||
class IterateOopClosureRegionClosure: public HeapRegionClosure {
|
||||
MemRegion _mr;
|
||||
ExtendedOopClosure* _cl;
|
||||
public:
|
||||
IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
|
||||
: _mr(mr), _cl(cl) {}
|
||||
IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
r->oop_iterate(_cl);
|
||||
@ -2600,12 +2597,7 @@ public:
|
||||
};
|
||||
|
||||
void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
|
||||
IterateOopClosureRegionClosure blk(_g1_committed, cl);
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
|
||||
IterateOopClosureRegionClosure blk(mr, cl);
|
||||
IterateOopClosureRegionClosure blk(cl);
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
@ -4792,11 +4784,6 @@ protected:
|
||||
Mutex _stats_lock;
|
||||
Mutex* stats_lock() { return &_stats_lock; }
|
||||
|
||||
size_t getNCards() {
|
||||
return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
|
||||
/ G1BlockOffsetSharedArray::N_bytes;
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
|
||||
: AbstractGangTask("G1 collection"),
|
||||
|
@ -1395,9 +1395,6 @@ public:
|
||||
// "cl.do_oop" on each.
|
||||
virtual void oop_iterate(ExtendedOopClosure* cl);
|
||||
|
||||
// Same as above, restricted to a memory region.
|
||||
void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
|
||||
|
@ -237,8 +237,10 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
_last_gc_worker_times_ms.verify();
|
||||
_last_gc_worker_other_times_ms.verify();
|
||||
|
||||
_last_redirty_logged_cards_time_ms.verify();
|
||||
_last_redirty_logged_cards_processed_cards.verify();
|
||||
if (G1DeferredRSUpdate) {
|
||||
_last_redirty_logged_cards_time_ms.verify();
|
||||
_last_redirty_logged_cards_processed_cards.verify();
|
||||
}
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::note_string_dedup_fixup_start() {
|
||||
|
@ -289,7 +289,7 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
|
||||
}
|
||||
|
||||
_fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
|
||||
mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||
|
||||
if (_fine_grain_regions == NULL) {
|
||||
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
|
||||
|
@ -280,9 +280,6 @@ class AbstractInterpreterGenerator: public StackObj {
|
||||
address generate_result_handler_for(BasicType type);
|
||||
address generate_slow_signature_handler();
|
||||
|
||||
// entry point generator
|
||||
address generate_method_entry(AbstractInterpreter::MethodKind kind);
|
||||
|
||||
void bang_stack_shadow_pages(bool native_call);
|
||||
|
||||
void generate_all();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -108,7 +108,7 @@ void CppInterpreterGenerator::generate_all() {
|
||||
}
|
||||
|
||||
|
||||
#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
|
||||
#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind)
|
||||
|
||||
{ CodeletMark cm(_masm, "(kind = frame_manager)");
|
||||
// all non-native method kinds
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "interpreter/bytecodeHistogram.hpp"
|
||||
#include "interpreter/bytecodeInterpreter.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterGenerator.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "interpreter/templateTable.hpp"
|
||||
@ -261,7 +262,7 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
|
||||
// Special intrinsic method?
|
||||
// Note: This test must come _after_ the test for native methods,
|
||||
// otherwise we will run into problems with JDK 1.2, see also
|
||||
// AbstractInterpreterGenerator::generate_method_entry() for
|
||||
// InterpreterGenerator::generate_method_entry() for
|
||||
// for details.
|
||||
switch (m->intrinsic_id()) {
|
||||
case vmIntrinsics::_dsin : return java_lang_math_sin ;
|
||||
@ -521,3 +522,50 @@ void AbstractInterpreterGenerator::initialize_method_handle_entries() {
|
||||
Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
|
||||
}
|
||||
}
|
||||
|
||||
// Generate method entries
|
||||
address InterpreterGenerator::generate_method_entry(
|
||||
AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : entry_point = generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = generate_Reference_get_entry(); break;
|
||||
#ifndef CC_INTERP
|
||||
case Interpreter::java_util_zip_CRC32_update
|
||||
: entry_point = generate_CRC32_update_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_updateBytes
|
||||
: // fall thru
|
||||
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
||||
: entry_point = generate_CRC32_updateBytes_entry(kind); break;
|
||||
#endif // CC_INTERP
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
return generate_normal_entry(synchronized);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,9 +37,11 @@
|
||||
class InterpreterGenerator: public CC_INTERP_ONLY(CppInterpreterGenerator)
|
||||
NOT_CC_INTERP(TemplateInterpreterGenerator) {
|
||||
|
||||
public:
|
||||
public:
|
||||
|
||||
InterpreterGenerator(StubQueue* _code);
|
||||
InterpreterGenerator(StubQueue* _code);
|
||||
// entry point generator
|
||||
address generate_method_entry(AbstractInterpreter::MethodKind kind);
|
||||
|
||||
#ifdef TARGET_ARCH_x86
|
||||
# include "interpreterGenerator_x86.hpp"
|
||||
|
@ -364,7 +364,7 @@ void TemplateInterpreterGenerator::generate_all() {
|
||||
|
||||
#define method_entry(kind) \
|
||||
{ CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
|
||||
Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
|
||||
Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind); \
|
||||
}
|
||||
|
||||
// all non-native method kinds
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -59,9 +59,6 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
|
||||
address generate_safept_entry_for(TosState state, address runtime_entry);
|
||||
void generate_throw_exception();
|
||||
|
||||
// entry point generator
|
||||
// address generate_method_entry(AbstractInterpreter::MethodKind kind);
|
||||
|
||||
// Instruction generation
|
||||
void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
|
||||
void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
|
||||
|
@ -422,26 +422,23 @@ void Chunk::start_chunk_pool_cleaner_task() {
|
||||
}
|
||||
|
||||
//------------------------------Arena------------------------------------------
|
||||
NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
|
||||
|
||||
Arena::Arena(size_t init_size) {
|
||||
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
|
||||
size_t round_size = (sizeof (char *)) - 1;
|
||||
init_size = (init_size+round_size) & ~round_size;
|
||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
_size_in_bytes = 0;
|
||||
MemTracker::record_new_arena(flag);
|
||||
set_size_in_bytes(init_size);
|
||||
NOT_PRODUCT(Atomic::inc(&_instance_count);)
|
||||
}
|
||||
|
||||
Arena::Arena() {
|
||||
Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
|
||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
_size_in_bytes = 0;
|
||||
MemTracker::record_new_arena(flag);
|
||||
set_size_in_bytes(Chunk::init_size);
|
||||
NOT_PRODUCT(Atomic::inc(&_instance_count);)
|
||||
}
|
||||
|
||||
Arena *Arena::move_contents(Arena *copy) {
|
||||
@ -463,7 +460,7 @@ Arena *Arena::move_contents(Arena *copy) {
|
||||
|
||||
Arena::~Arena() {
|
||||
destruct_contents();
|
||||
NOT_PRODUCT(Atomic::dec(&_instance_count);)
|
||||
MemTracker::record_arena_free(_flags);
|
||||
}
|
||||
|
||||
void* Arena::operator new(size_t size) throw() {
|
||||
@ -479,21 +476,21 @@ void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant)
|
||||
// dynamic memory type binding
|
||||
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
|
||||
#ifdef ASSERT
|
||||
void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
|
||||
void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
||||
return p;
|
||||
#else
|
||||
return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
|
||||
return (void *) AllocateHeap(size, flags, CALLER_PC);
|
||||
#endif
|
||||
}
|
||||
|
||||
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
|
||||
#ifdef ASSERT
|
||||
void* p = os::malloc(size, flags|otArena, CALLER_PC);
|
||||
void* p = os::malloc(size, flags, CALLER_PC);
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
||||
return p;
|
||||
#else
|
||||
return os::malloc(size, flags|otArena, CALLER_PC);
|
||||
return os::malloc(size, flags, CALLER_PC);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -518,8 +515,9 @@ void Arena::destruct_contents() {
|
||||
// change the size
|
||||
void Arena::set_size_in_bytes(size_t size) {
|
||||
if (_size_in_bytes != size) {
|
||||
long delta = (long)(size - size_in_bytes());
|
||||
_size_in_bytes = size;
|
||||
MemTracker::record_arena_size((address)this, size);
|
||||
MemTracker::record_arena_size_change(delta, _flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -133,51 +133,34 @@ class AllocatedObj {
|
||||
|
||||
|
||||
/*
|
||||
* MemoryType bitmap layout:
|
||||
* | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
|
||||
* | memory type | object | reserved |
|
||||
* | | type | |
|
||||
* Memory types
|
||||
*/
|
||||
enum MemoryType {
|
||||
// Memory type by sub systems. It occupies lower byte.
|
||||
mtNone = 0x0000, // undefined
|
||||
mtClass = 0x0100, // memory class for Java classes
|
||||
mtThread = 0x0200, // memory for thread objects
|
||||
mtThreadStack = 0x0300,
|
||||
mtCode = 0x0400, // memory for generated code
|
||||
mtGC = 0x0500, // memory for GC
|
||||
mtCompiler = 0x0600, // memory for compiler
|
||||
mtInternal = 0x0700, // memory used by VM, but does not belong to
|
||||
mtJavaHeap = 0x00, // Java heap
|
||||
mtClass = 0x01, // memory class for Java classes
|
||||
mtThread = 0x02, // memory for thread objects
|
||||
mtThreadStack = 0x03,
|
||||
mtCode = 0x04, // memory for generated code
|
||||
mtGC = 0x05, // memory for GC
|
||||
mtCompiler = 0x06, // memory for compiler
|
||||
mtInternal = 0x07, // memory used by VM, but does not belong to
|
||||
// any of above categories, and not used for
|
||||
// native memory tracking
|
||||
mtOther = 0x0800, // memory not used by VM
|
||||
mtSymbol = 0x0900, // symbol
|
||||
mtNMT = 0x0A00, // memory used by native memory tracking
|
||||
mtChunk = 0x0B00, // chunk that holds content of arenas
|
||||
mtJavaHeap = 0x0C00, // Java heap
|
||||
mtClassShared = 0x0D00, // class data sharing
|
||||
mtTest = 0x0E00, // Test type for verifying NMT
|
||||
mtTracing = 0x0F00, // memory used for Tracing
|
||||
mt_number_of_types = 0x000F, // number of memory types (mtDontTrack
|
||||
mtOther = 0x08, // memory not used by VM
|
||||
mtSymbol = 0x09, // symbol
|
||||
mtNMT = 0x0A, // memory used by native memory tracking
|
||||
mtClassShared = 0x0B, // class data sharing
|
||||
mtChunk = 0x0C, // chunk that holds content of arenas
|
||||
mtTest = 0x0D, // Test type for verifying NMT
|
||||
mtTracing = 0x0E, // memory used for Tracing
|
||||
mtNone = 0x0F, // undefined
|
||||
mt_number_of_types = 0x10 // number of memory types (mtDontTrack
|
||||
// is not included as validate type)
|
||||
mtDontTrack = 0x0F00, // memory we do not or cannot track
|
||||
mt_masks = 0x7F00,
|
||||
|
||||
// object type mask
|
||||
otArena = 0x0010, // an arena object
|
||||
otNMTRecorder = 0x0020, // memory recorder object
|
||||
ot_masks = 0x00F0
|
||||
};
|
||||
|
||||
#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
|
||||
#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
|
||||
#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
|
||||
typedef MemoryType MEMFLAGS;
|
||||
|
||||
#define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena)
|
||||
#define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder)
|
||||
#define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
|
||||
|
||||
typedef unsigned short MEMFLAGS;
|
||||
|
||||
#if INCLUDE_NMT
|
||||
|
||||
@ -189,27 +172,23 @@ const bool NMT_track_callsite = false;
|
||||
|
||||
#endif // INCLUDE_NMT
|
||||
|
||||
// debug build does not inline
|
||||
#if defined(_NMT_NOINLINE_)
|
||||
#define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
|
||||
#define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
|
||||
#define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
|
||||
#else
|
||||
#define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0)
|
||||
#define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
|
||||
#define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
|
||||
#endif
|
||||
|
||||
class NativeCallStack;
|
||||
|
||||
|
||||
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
|
||||
public:
|
||||
_NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
|
||||
_NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
|
||||
_NOINLINE_ void* operator new(size_t size) throw();
|
||||
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
|
||||
address caller_pc = 0) throw();
|
||||
_NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
|
||||
const NativeCallStack& stack) throw();
|
||||
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
|
||||
throw();
|
||||
_NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
|
||||
_NOINLINE_ void* operator new [](size_t size) throw();
|
||||
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
|
||||
address caller_pc = 0) throw();
|
||||
const NativeCallStack& stack) throw();
|
||||
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
|
||||
throw();
|
||||
void operator delete(void* p);
|
||||
void operator delete [] (void* p);
|
||||
};
|
||||
@ -384,13 +363,15 @@ class Chunk: CHeapObj<mtChunk> {
|
||||
|
||||
//------------------------------Arena------------------------------------------
|
||||
// Fast allocation of memory
|
||||
class Arena : public CHeapObj<mtNone|otArena> {
|
||||
class Arena : public CHeapObj<mtNone> {
|
||||
protected:
|
||||
friend class ResourceMark;
|
||||
friend class HandleMark;
|
||||
friend class NoHandleMark;
|
||||
friend class VMStructs;
|
||||
|
||||
MEMFLAGS _flags; // Memory tracking flags
|
||||
|
||||
Chunk *_first; // First chunk
|
||||
Chunk *_chunk; // current chunk
|
||||
char *_hwm, *_max; // High water mark and max in current chunk
|
||||
@ -418,8 +399,8 @@ protected:
|
||||
}
|
||||
|
||||
public:
|
||||
Arena();
|
||||
Arena(size_t init_size);
|
||||
Arena(MEMFLAGS memflag);
|
||||
Arena(MEMFLAGS memflag, size_t init_size);
|
||||
~Arena();
|
||||
void destruct_contents();
|
||||
char* hwm() const { return _hwm; }
|
||||
@ -518,8 +499,6 @@ protected:
|
||||
static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
|
||||
static void free_all(char** start, char** end) PRODUCT_RETURN;
|
||||
|
||||
// how many arena instances
|
||||
NOT_PRODUCT(static volatile jint _instance_count;)
|
||||
private:
|
||||
// Reset this Arena to empty, access will trigger grow if necessary
|
||||
void reset(void) {
|
||||
@ -681,7 +660,7 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
||||
NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
|
||||
|
||||
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
|
||||
NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
|
||||
NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
|
||||
|
||||
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
|
||||
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
|
||||
// Explicit C-heap memory management
|
||||
|
||||
@ -49,12 +50,10 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) {
|
||||
#endif
|
||||
|
||||
// allocate using malloc; will fail if no memory available
|
||||
inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
|
||||
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
|
||||
const NativeCallStack& stack,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
if (pc == 0) {
|
||||
pc = CURRENT_PC;
|
||||
}
|
||||
char* p = (char*) os::malloc(size, flags, pc);
|
||||
char* p = (char*) os::malloc(size, flags, stack);
|
||||
#ifdef ASSERT
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
|
||||
#endif
|
||||
@ -63,10 +62,14 @@ inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
|
||||
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
|
||||
return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
|
||||
}
|
||||
|
||||
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
|
||||
#ifdef ASSERT
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
|
||||
#endif
|
||||
@ -85,8 +88,22 @@ inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
|
||||
|
||||
|
||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
|
||||
address caller_pc) throw() {
|
||||
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
|
||||
const NativeCallStack& stack) throw() {
|
||||
void* p = (void*)AllocateHeap(size, F, stack);
|
||||
#ifdef ASSERT
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
|
||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
|
||||
return CHeapObj<F>::operator new(size, CALLER_PC);
|
||||
}
|
||||
|
||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
|
||||
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
|
||||
void* p = (void*)AllocateHeap(size, F, stack,
|
||||
AllocFailStrategy::RETURN_NULL);
|
||||
#ifdef ASSERT
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
||||
#endif
|
||||
@ -94,23 +111,28 @@ template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
|
||||
}
|
||||
|
||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
|
||||
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
|
||||
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
|
||||
AllocFailStrategy::RETURN_NULL);
|
||||
#ifdef ASSERT
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
||||
#endif
|
||||
return p;
|
||||
const std::nothrow_t& nothrow_constant) throw() {
|
||||
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
|
||||
}
|
||||
|
||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
||||
address caller_pc) throw() {
|
||||
return CHeapObj<F>::operator new(size, caller_pc);
|
||||
const NativeCallStack& stack) throw() {
|
||||
return CHeapObj<F>::operator new(size, stack);
|
||||
}
|
||||
|
||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
|
||||
throw() {
|
||||
return CHeapObj<F>::operator new(size, CALLER_PC);
|
||||
}
|
||||
|
||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
||||
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
|
||||
return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
|
||||
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
|
||||
return CHeapObj<F>::operator new(size, nothrow_constant, stack);
|
||||
}
|
||||
|
||||
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
||||
const std::nothrow_t& nothrow_constant) throw() {
|
||||
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
|
||||
}
|
||||
|
||||
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
|
||||
|
@ -56,7 +56,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
|
||||
#endif
|
||||
set_bs(_ct_bs);
|
||||
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
|
||||
mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||
if (_last_cur_val_in_gen == NULL) {
|
||||
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
|
||||
}
|
||||
|
@ -176,13 +176,9 @@ size_t CollectorPolicy::compute_heap_alignment() {
|
||||
|
||||
size_t alignment = GenRemSet::max_alignment_constraint();
|
||||
|
||||
// Parallel GC does its own alignment of the generations to avoid requiring a
|
||||
// large page (256M on some platforms) for the permanent generation. The
|
||||
// other collectors should also be updated to do their own alignment and then
|
||||
// this use of lcm() should be removed.
|
||||
if (UseLargePages && !UseParallelGC) {
|
||||
if (UseLargePages) {
|
||||
// In presence of large pages we have to make sure that our
|
||||
// alignment is large page aware
|
||||
// alignment is large page aware.
|
||||
alignment = lcm(os::large_page_size(), alignment);
|
||||
}
|
||||
|
||||
@ -909,7 +905,8 @@ void MarkSweepPolicy::initialize_alignments() {
|
||||
}
|
||||
|
||||
void MarkSweepPolicy::initialize_generations() {
|
||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
|
||||
AllocFailStrategy::RETURN_NULL);
|
||||
if (_generations == NULL) {
|
||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ KlassInfoTable::KlassInfoTable(bool need_class_stats) {
|
||||
_ref = (HeapWord*) Universe::boolArrayKlassObj();
|
||||
_buckets =
|
||||
(KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
|
||||
mtInternal, 0, AllocFailStrategy::RETURN_NULL);
|
||||
mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||
if (_buckets != NULL) {
|
||||
_size = _num_buckets;
|
||||
for (int index = 0; index < _size; index++) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -103,11 +103,13 @@ MemRegion MemRegion::minus(const MemRegion mr2) const {
|
||||
}
|
||||
|
||||
void* MemRegion::operator new(size_t size) throw() {
|
||||
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
return (address)AllocateHeap(size, mtGC, CURRENT_PC,
|
||||
AllocFailStrategy::RETURN_NULL);
|
||||
}
|
||||
|
||||
void* MemRegion::operator new [](size_t size) throw() {
|
||||
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
return (address)AllocateHeap(size, mtGC, CURRENT_PC,
|
||||
AllocFailStrategy::RETURN_NULL);
|
||||
}
|
||||
void MemRegion::operator delete(void* p) {
|
||||
FreeHeap(p, mtGC);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -49,11 +49,11 @@ class ResourceArea: public Arena {
|
||||
debug_only(static int _warned;) // to suppress multiple warnings
|
||||
|
||||
public:
|
||||
ResourceArea() {
|
||||
ResourceArea() : Arena(mtThread) {
|
||||
debug_only(_nesting = 0;)
|
||||
}
|
||||
|
||||
ResourceArea(size_t init_size) : Arena(init_size) {
|
||||
ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
|
||||
debug_only(_nesting = 0;);
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ public:
|
||||
if (UseMallocOnly) {
|
||||
// use malloc, but save pointer in res. area for later freeing
|
||||
char** save = (char**)internal_malloc_4(sizeof(char*));
|
||||
return (*save = (char*)os::malloc(size, mtThread));
|
||||
return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
|
||||
}
|
||||
#endif
|
||||
return (char*)Amalloc(size, alloc_failmode);
|
||||
|
@ -647,6 +647,10 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
_printer(IdealGraphPrinter::printer()),
|
||||
#endif
|
||||
_congraph(NULL),
|
||||
_comp_arena(mtCompiler),
|
||||
_node_arena(mtCompiler),
|
||||
_old_arena(mtCompiler),
|
||||
_Compile_types(mtCompiler),
|
||||
_replay_inline_data(NULL),
|
||||
_late_inlines(comp_arena(), 2, 0, NULL),
|
||||
_string_late_inlines(comp_arena(), 2, 0, NULL),
|
||||
@ -954,6 +958,10 @@ Compile::Compile( ciEnv* ci_env,
|
||||
_in_dump_cnt(0),
|
||||
_printer(NULL),
|
||||
#endif
|
||||
_comp_arena(mtCompiler),
|
||||
_node_arena(mtCompiler),
|
||||
_old_arena(mtCompiler),
|
||||
_Compile_types(mtCompiler),
|
||||
_dead_node_list(comp_arena()),
|
||||
_dead_node_count(0),
|
||||
_congraph(NULL),
|
||||
|
@ -503,7 +503,7 @@ int IfNode::is_range_check(Node* &range, Node* &index, jint &offset) {
|
||||
jint off = 0;
|
||||
if (l->is_top()) {
|
||||
return 0;
|
||||
} else if (l->is_Add()) {
|
||||
} else if (l->Opcode() == Op_AddI) {
|
||||
if ((off = l->in(1)->find_int_con(0)) != 0) {
|
||||
ind = l->in(2);
|
||||
} else if ((off = l->in(2)->find_int_con(0)) != 0) {
|
||||
|
@ -2595,7 +2595,8 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
|
||||
if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
|
||||
|
||||
if (!is_store) {
|
||||
Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile);
|
||||
MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
|
||||
Node* p = make_load(control(), adr, value_type, type, adr_type, mo, is_volatile);
|
||||
// load value
|
||||
switch (type) {
|
||||
case T_BOOLEAN:
|
||||
@ -5096,8 +5097,19 @@ Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * field
|
||||
type = Type::get_const_basic_type(bt);
|
||||
}
|
||||
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
|
||||
insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
|
||||
}
|
||||
// Build the load.
|
||||
Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol);
|
||||
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
|
||||
Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
|
||||
// If reference is volatile, prevent following memory ops from
|
||||
// floating up past the volatile read. Also prevents commoning
|
||||
// another volatile read.
|
||||
if (is_vol) {
|
||||
// Memory barrier includes bogus read of value to force load BEFORE membar
|
||||
insert_mem_bar(Op_MemBarAcquire, loadedField);
|
||||
}
|
||||
return loadedField;
|
||||
}
|
||||
|
||||
|
@ -1083,6 +1083,9 @@ bool Node::has_special_unique_user() const {
|
||||
if( this->is_Store() ) {
|
||||
// Condition for back-to-back stores folding.
|
||||
return n->Opcode() == op && n->in(MemNode::Memory) == this;
|
||||
} else if (this->is_Load()) {
|
||||
// Condition for removing an unused LoadNode from the MemBarAcquire precedence input
|
||||
return n->Opcode() == Op_MemBarAcquire;
|
||||
} else if( op == Op_AddL ) {
|
||||
// Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
|
||||
return n->Opcode() == Op_ConvL2I && n->in(1) == this;
|
||||
|
@ -1381,11 +1381,11 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount
|
||||
}
|
||||
NamedCounter* c;
|
||||
if (tag == NamedCounter::BiasedLockingCounter) {
|
||||
c = new BiasedLockingNamedCounter(strdup(st.as_string()));
|
||||
c = new BiasedLockingNamedCounter(st.as_string());
|
||||
} else if (tag == NamedCounter::RTMLockingCounter) {
|
||||
c = new RTMLockingNamedCounter(strdup(st.as_string()));
|
||||
c = new RTMLockingNamedCounter(st.as_string());
|
||||
} else {
|
||||
c = new NamedCounter(strdup(st.as_string()), tag);
|
||||
c = new NamedCounter(st.as_string(), tag);
|
||||
}
|
||||
|
||||
// atomically add the new counter to the head of the list. We only
|
||||
|
@ -75,11 +75,17 @@ private:
|
||||
|
||||
public:
|
||||
NamedCounter(const char *n, CounterTag tag = NoTag):
|
||||
_name(n),
|
||||
_name(n == NULL ? NULL : os::strdup(n)),
|
||||
_count(0),
|
||||
_next(NULL),
|
||||
_tag(tag) {}
|
||||
|
||||
~NamedCounter() {
|
||||
if (_name != NULL) {
|
||||
os::free((void*)_name);
|
||||
}
|
||||
}
|
||||
|
||||
const char * name() const { return _name; }
|
||||
int count() const { return _count; }
|
||||
address addr() { return (address)&_count; }
|
||||
|
@ -265,7 +265,7 @@ void Type::Initialize_shared(Compile* current) {
|
||||
// locking.
|
||||
|
||||
Arena* save = current->type_arena();
|
||||
Arena* shared_type_arena = new (mtCompiler)Arena();
|
||||
Arena* shared_type_arena = new (mtCompiler)Arena(mtCompiler);
|
||||
|
||||
current->set_type_arena(shared_type_arena);
|
||||
_shared_type_dict =
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -222,10 +222,17 @@
|
||||
# include "runtime/vmThread.hpp"
|
||||
# include "runtime/vm_operations.hpp"
|
||||
# include "runtime/vm_version.hpp"
|
||||
# include "services/allocationSite.hpp"
|
||||
# include "services/lowMemoryDetector.hpp"
|
||||
# include "services/mallocTracker.hpp"
|
||||
# include "services/memBaseline.hpp"
|
||||
# include "services/memoryPool.hpp"
|
||||
# include "services/memoryService.hpp"
|
||||
# include "services/memoryUsage.hpp"
|
||||
# include "services/memReporter.hpp"
|
||||
# include "services/memTracker.hpp"
|
||||
# include "services/nmtCommon.hpp"
|
||||
# include "services/virtualMemoryTracker.hpp"
|
||||
# include "utilities/accessFlags.hpp"
|
||||
# include "utilities/array.hpp"
|
||||
# include "utilities/bitMap.hpp"
|
||||
@ -240,6 +247,7 @@
|
||||
# include "utilities/hashtable.hpp"
|
||||
# include "utilities/histogram.hpp"
|
||||
# include "utilities/macros.hpp"
|
||||
# include "utilities/nativeCallStack.hpp"
|
||||
# include "utilities/numberSeq.hpp"
|
||||
# include "utilities/ostream.hpp"
|
||||
# include "utilities/preserveException.hpp"
|
||||
|
@ -74,6 +74,7 @@
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/vm_operations.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "services/runtimeService.hpp"
|
||||
#include "trace/tracing.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
@ -2697,6 +2698,7 @@ static char* get_bad_address() {
|
||||
if (bad_address != NULL) {
|
||||
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
|
||||
/*is_committed*/false);
|
||||
MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
|
||||
}
|
||||
}
|
||||
return bad_address;
|
||||
@ -3857,6 +3859,7 @@ void TestOldSize_test();
|
||||
void TestKlass_test();
|
||||
void TestBitMap_test();
|
||||
void TestAsUtf8();
|
||||
void Test_linked_list();
|
||||
#if INCLUDE_ALL_GCS
|
||||
void TestOldFreeSpaceCalculation_test();
|
||||
void TestG1BiasedArray_test();
|
||||
@ -3887,6 +3890,7 @@ void execute_internal_vm_tests() {
|
||||
run_unit_test(TestBitMap_test());
|
||||
run_unit_test(TestAsUtf8());
|
||||
run_unit_test(ObjectMonitor::sanity_checks());
|
||||
run_unit_test(Test_linked_list());
|
||||
#if INCLUDE_VM_STRUCTS
|
||||
run_unit_test(VMStructs::test());
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -316,6 +316,7 @@ void JvmtiManageCapabilities::update() {
|
||||
avail.can_generate_frame_pop_events ||
|
||||
avail.can_generate_method_entry_events ||
|
||||
avail.can_generate_method_exit_events;
|
||||
#ifdef ZERO
|
||||
bool enter_all_methods =
|
||||
interp_events ||
|
||||
avail.can_generate_breakpoint_events;
|
||||
@ -324,6 +325,7 @@ void JvmtiManageCapabilities::update() {
|
||||
UseFastEmptyMethods = false;
|
||||
UseFastAccessorMethods = false;
|
||||
}
|
||||
#endif // ZERO
|
||||
|
||||
if (avail.can_generate_breakpoint_events) {
|
||||
RewriteFrequentPairs = false;
|
||||
|
@ -52,8 +52,10 @@
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#ifdef INCLUDE_NMT
|
||||
#if INCLUDE_NMT
|
||||
#include "services/mallocSiteTable.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
#endif // INCLUDE_NMT
|
||||
|
||||
#include "compiler/compileBroker.hpp"
|
||||
@ -255,14 +257,18 @@ WB_END
|
||||
// NMT picks it up correctly
|
||||
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
|
||||
jlong addr = 0;
|
||||
|
||||
if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
|
||||
addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
|
||||
}
|
||||
|
||||
return addr;
|
||||
WB_END
|
||||
|
||||
// Alloc memory with pseudo call stack. The test can create psudo malloc
|
||||
// allocation site to stress the malloc tracking.
|
||||
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
|
||||
address pc = (address)(size_t)pseudo_stack;
|
||||
NativeCallStack stack(&pc, 1);
|
||||
return (jlong)os::malloc(size, mtTest, stack);
|
||||
WB_END
|
||||
|
||||
// Free the memory allocated by NMTAllocTest
|
||||
WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
|
||||
os::free((void*)(uintptr_t)mem, mtTest);
|
||||
@ -271,10 +277,8 @@ WB_END
|
||||
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
|
||||
jlong addr = 0;
|
||||
|
||||
if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
|
||||
addr = (jlong)(uintptr_t)os::reserve_memory(size);
|
||||
MemTracker::record_virtual_memory_type((address)addr, mtTest);
|
||||
}
|
||||
|
||||
return addr;
|
||||
WB_END
|
||||
@ -293,20 +297,20 @@ WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong siz
|
||||
os::release_memory((char *)(uintptr_t)addr, size);
|
||||
WB_END
|
||||
|
||||
// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
|
||||
WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
|
||||
|
||||
if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return MemTracker::wbtest_wait_for_data_merge();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
|
||||
return MemTracker::tracking_level() == MemTracker::NMT_detail;
|
||||
return MemTracker::tracking_level() == NMT_detail;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
|
||||
address pc = (address)1;
|
||||
for (jlong index = 0; index < num; index ++) {
|
||||
NativeCallStack stack(&pc, 1);
|
||||
os::malloc(0, mtTest, stack);
|
||||
pc += MallocSiteTable::hash_buckets();
|
||||
}
|
||||
WB_END
|
||||
|
||||
|
||||
#endif // INCLUDE_NMT
|
||||
|
||||
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
|
||||
@ -597,6 +601,15 @@ WB_ENTRY(jobject, WB_GetUint64VMFlag(JNIEnv* env, jobject o, jstring name))
|
||||
return NULL;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jobject, WB_GetSizeTVMFlag(JNIEnv* env, jobject o, jstring name))
|
||||
uintx result;
|
||||
if (GetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAt)) {
|
||||
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
|
||||
return longBox(thread, env, result);
|
||||
}
|
||||
return NULL;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jobject, WB_GetDoubleVMFlag(JNIEnv* env, jobject o, jstring name))
|
||||
double result;
|
||||
if (GetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAt)) {
|
||||
@ -637,6 +650,11 @@ WB_ENTRY(void, WB_SetUint64VMFlag(JNIEnv* env, jobject o, jstring name, jlong va
|
||||
SetVMFlag <uint64_t> (thread, env, name, &result, &CommandLineFlags::uint64_tAtPut);
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_SetSizeTVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
|
||||
size_t result = value;
|
||||
SetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAtPut);
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_SetDoubleVMFlag(JNIEnv* env, jobject o, jstring name, jdouble value))
|
||||
double result = value;
|
||||
SetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAtPut);
|
||||
@ -843,12 +861,13 @@ static JNINativeMethod methods[] = {
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#if INCLUDE_NMT
|
||||
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
|
||||
{CC"NMTMallocWithPseudoStack", CC"(JI)J", (void*)&WB_NMTMallocWithPseudoStack},
|
||||
{CC"NMTFree", CC"(J)V", (void*)&WB_NMTFree },
|
||||
{CC"NMTReserveMemory", CC"(J)J", (void*)&WB_NMTReserveMemory },
|
||||
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
|
||||
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
|
||||
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
|
||||
{CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge},
|
||||
{CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
|
||||
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
|
||||
#endif // INCLUDE_NMT
|
||||
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
|
||||
@ -880,6 +899,7 @@ static JNINativeMethod methods[] = {
|
||||
{CC"setIntxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntxVMFlag},
|
||||
{CC"setUintxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintxVMFlag},
|
||||
{CC"setUint64VMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUint64VMFlag},
|
||||
{CC"setSizeTVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetSizeTVMFlag},
|
||||
{CC"setDoubleVMFlag", CC"(Ljava/lang/String;D)V",(void*)&WB_SetDoubleVMFlag},
|
||||
{CC"setStringVMFlag", CC"(Ljava/lang/String;Ljava/lang/String;)V",
|
||||
(void*)&WB_SetStringVMFlag},
|
||||
@ -891,6 +911,8 @@ static JNINativeMethod methods[] = {
|
||||
(void*)&WB_GetUintxVMFlag},
|
||||
{CC"getUint64VMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;",
|
||||
(void*)&WB_GetUint64VMFlag},
|
||||
{CC"getSizeTVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Long;",
|
||||
(void*)&WB_GetSizeTVMFlag},
|
||||
{CC"getDoubleVMFlag", CC"(Ljava/lang/String;)Ljava/lang/Double;",
|
||||
(void*)&WB_GetDoubleVMFlag},
|
||||
{CC"getStringVMFlag", CC"(Ljava/lang/String;)Ljava/lang/String;",
|
||||
|
@ -300,6 +300,11 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
||||
{ "UseNewReflection", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||
#ifndef ZERO
|
||||
{ "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||
{ "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||
#endif // ZERO
|
||||
{ NULL, JDK_Version(0), JDK_Version(0) }
|
||||
};
|
||||
|
||||
@ -686,6 +691,10 @@ static bool set_numeric_flag(char* name, char* value, Flag::Flags origin) {
|
||||
if (!is_neg && CommandLineFlags::uint64_tAtPut(name, &uint64_t_v, origin)) {
|
||||
return true;
|
||||
}
|
||||
size_t size_t_v = (size_t) v;
|
||||
if (!is_neg && CommandLineFlags::size_tAtPut(name, &size_t_v, origin)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -799,7 +808,7 @@ void Arguments::add_string(char*** bldarray, int* count, const char* arg) {
|
||||
} else {
|
||||
*bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtInternal);
|
||||
}
|
||||
(*bldarray)[*count] = strdup(arg);
|
||||
(*bldarray)[*count] = os::strdup_check_oom(arg);
|
||||
*count = new_count;
|
||||
}
|
||||
|
||||
@ -1070,16 +1079,6 @@ void Arguments::set_mode_flags(Mode mode) {
|
||||
UseCompiler = true;
|
||||
UseLoopCounter = true;
|
||||
|
||||
#ifndef ZERO
|
||||
// Turn these off for mixed and comp. Leave them on for Zero.
|
||||
if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) {
|
||||
UseFastAccessorMethods = (mode == _int);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) {
|
||||
UseFastEmptyMethods = (mode == _int);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Default values may be platform/compiler dependent -
|
||||
// use the saved values
|
||||
ClipInlining = Arguments::_ClipInlining;
|
||||
@ -1885,7 +1884,7 @@ void Arguments::process_java_compiler_argument(char* arg) {
|
||||
}
|
||||
|
||||
void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) {
|
||||
_sun_java_launcher = strdup(launcher);
|
||||
_sun_java_launcher = os::strdup_check_oom(launcher);
|
||||
}
|
||||
|
||||
bool Arguments::created_by_java_launcher() {
|
||||
@ -2388,7 +2387,7 @@ bool Arguments::check_vm_args_consistency() {
|
||||
|
||||
if (PrintNMTStatistics) {
|
||||
#if INCLUDE_NMT
|
||||
if (MemTracker::tracking_level() == MemTracker::NMT_off) {
|
||||
if (MemTracker::tracking_level() == NMT_off) {
|
||||
#endif // INCLUDE_NMT
|
||||
warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
|
||||
PrintNMTStatistics = false;
|
||||
@ -2995,7 +2994,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
// Redirect GC output to the file. -Xloggc:<filename>
|
||||
// ostream_init_log(), when called will use this filename
|
||||
// to initialize a fileStream.
|
||||
_gc_log_filename = strdup(tail);
|
||||
_gc_log_filename = os::strdup_check_oom(tail);
|
||||
if (!is_filename_valid(_gc_log_filename)) {
|
||||
jio_fprintf(defaultStream::output_stream(),
|
||||
"Invalid file name for use with -Xloggc: Filename can only contain the "
|
||||
@ -3598,15 +3597,24 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
CommandLineFlags::printFlags(tty, false);
|
||||
vm_exit(0);
|
||||
}
|
||||
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
|
||||
#if INCLUDE_NMT
|
||||
MemTracker::init_tracking_options(tail);
|
||||
#else
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Native Memory Tracking is not supported in this VM\n");
|
||||
return JNI_ERR;
|
||||
#endif
|
||||
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
|
||||
// The launcher did not setup nmt environment variable properly.
|
||||
// if (!MemTracker::check_launcher_nmt_support(tail)) {
|
||||
// warning("Native Memory Tracking did not setup properly, using wrong launcher?");
|
||||
// }
|
||||
|
||||
// Verify if nmt option is valid.
|
||||
if (MemTracker::verify_nmt_option()) {
|
||||
// Late initialization, still in single-threaded mode.
|
||||
if (MemTracker::tracking_level() >= NMT_summary) {
|
||||
MemTracker::init();
|
||||
}
|
||||
} else {
|
||||
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -629,10 +629,16 @@ class vmNode : public ProfilerNode {
|
||||
}
|
||||
|
||||
vmNode(const char* name, const TickPosition where) : ProfilerNode() {
|
||||
_name = name;
|
||||
_name = os::strdup(name);
|
||||
update(where);
|
||||
}
|
||||
|
||||
~vmNode() {
|
||||
if (_name != NULL) {
|
||||
os::free((void*)_name);
|
||||
}
|
||||
}
|
||||
|
||||
const char *name() const { return _name; }
|
||||
bool is_compiled() const { return true; }
|
||||
|
||||
@ -784,7 +790,7 @@ void ThreadProfiler::vm_update(const char* name, TickPosition where) {
|
||||
assert(index >= 0, "Must be positive");
|
||||
// Note that we call strdup below since the symbol may be resource allocated
|
||||
if (!table[index]) {
|
||||
table[index] = new (this) vmNode(os::strdup(name), where);
|
||||
table[index] = new (this) vmNode(name, where);
|
||||
} else {
|
||||
ProfilerNode* prev = table[index];
|
||||
for(ProfilerNode* node = prev; node; node = node->next()) {
|
||||
@ -794,7 +800,7 @@ void ThreadProfiler::vm_update(const char* name, TickPosition where) {
|
||||
}
|
||||
prev = node;
|
||||
}
|
||||
prev->set_next(new (this) vmNode(os::strdup(name), where));
|
||||
prev->set_next(new (this) vmNode(name, where));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,6 +131,19 @@ void Flag::set_uint64_t(uint64_t value) {
|
||||
*((uint64_t*) _addr) = value;
|
||||
}
|
||||
|
||||
bool Flag::is_size_t() const {
|
||||
return strcmp(_type, "size_t") == 0;
|
||||
}
|
||||
|
||||
size_t Flag::get_size_t() const {
|
||||
return *((size_t*) _addr);
|
||||
}
|
||||
|
||||
void Flag::set_size_t(size_t value) {
|
||||
check_writable();
|
||||
*((size_t*) _addr) = value;
|
||||
}
|
||||
|
||||
bool Flag::is_double() const {
|
||||
return strcmp(_type, "double") == 0;
|
||||
}
|
||||
@ -306,6 +319,9 @@ void Flag::print_on(outputStream* st, bool withComments) {
|
||||
if (is_uint64_t()) {
|
||||
st->print("%-16lu", get_uint64_t());
|
||||
}
|
||||
if (is_size_t()) {
|
||||
st->print(SIZE_FORMAT_W(-16), get_size_t());
|
||||
}
|
||||
if (is_double()) {
|
||||
st->print("%-16f", get_double());
|
||||
}
|
||||
@ -395,6 +411,8 @@ void Flag::print_as_flag(outputStream* st) {
|
||||
st->print("-XX:%s=" UINTX_FORMAT, _name, get_uintx());
|
||||
} else if (is_uint64_t()) {
|
||||
st->print("-XX:%s=" UINT64_FORMAT, _name, get_uint64_t());
|
||||
} else if (is_size_t()) {
|
||||
st->print("-XX:%s=" SIZE_FORMAT, _name, get_size_t());
|
||||
} else if (is_double()) {
|
||||
st->print("-XX:%s=%f", _name, get_double());
|
||||
} else if (is_ccstr()) {
|
||||
@ -723,6 +741,34 @@ void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t va
|
||||
faddr->set_origin(origin);
|
||||
}
|
||||
|
||||
bool CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
if (!result->is_size_t()) return false;
|
||||
*value = result->get_size_t();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CommandLineFlags::size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
if (!result->is_size_t()) return false;
|
||||
size_t old_value = result->get_size_t();
|
||||
trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
|
||||
result->set_size_t(*value);
|
||||
*value = old_value;
|
||||
result->set_origin(origin);
|
||||
return true;
|
||||
}
|
||||
|
||||
void CommandLineFlagsEx::size_tAtPut(CommandLineFlagWithType flag, size_t value, Flag::Flags origin) {
|
||||
Flag* faddr = address_of_flag(flag);
|
||||
guarantee(faddr != NULL && faddr->is_size_t(), "wrong flag type");
|
||||
trace_flag_changed<EventUnsignedLongFlagChanged, u8>(faddr->_name, faddr->get_size_t(), value, origin);
|
||||
faddr->set_size_t(value);
|
||||
faddr->set_origin(origin);
|
||||
}
|
||||
|
||||
bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
|
@ -275,6 +275,10 @@ struct Flag {
|
||||
uint64_t get_uint64_t() const;
|
||||
void set_uint64_t(uint64_t value);
|
||||
|
||||
bool is_size_t() const;
|
||||
size_t get_size_t() const;
|
||||
void set_size_t(size_t value);
|
||||
|
||||
bool is_double() const;
|
||||
double get_double() const;
|
||||
void set_double(double value);
|
||||
@ -350,7 +354,6 @@ class UIntFlagSetting {
|
||||
~UIntFlagSetting() { *flag = val; }
|
||||
};
|
||||
|
||||
|
||||
class DoubleFlagSetting {
|
||||
double val;
|
||||
double* flag;
|
||||
@ -359,6 +362,14 @@ class DoubleFlagSetting {
|
||||
~DoubleFlagSetting() { *flag = val; }
|
||||
};
|
||||
|
||||
class SizeTFlagSetting {
|
||||
size_t val;
|
||||
size_t* flag;
|
||||
public:
|
||||
SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; }
|
||||
~SizeTFlagSetting() { *flag = val; }
|
||||
};
|
||||
|
||||
|
||||
class CommandLineFlags {
|
||||
public:
|
||||
@ -377,6 +388,11 @@ class CommandLineFlags {
|
||||
static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
|
||||
static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
|
||||
|
||||
static bool size_tAt(const char* name, size_t len, size_t* value);
|
||||
static bool size_tAt(const char* name, size_t* value) { return size_tAt(name, strlen(name), value); }
|
||||
static bool size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin);
|
||||
static bool size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
|
||||
|
||||
static bool uint64_tAt(const char* name, size_t len, uint64_t* value);
|
||||
static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
|
||||
static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
|
||||
@ -945,11 +961,6 @@ class CommandLineFlags {
|
||||
diagnostic(bool, PrintNMTStatistics, false, \
|
||||
"Print native memory tracking summary data if it is on") \
|
||||
\
|
||||
diagnostic(bool, AutoShutdownNMT, true, \
|
||||
"Automatically shutdown native memory tracking under stress " \
|
||||
"situations. When set to false, native memory tracking tries to " \
|
||||
"stay alive at the expense of JVM performance") \
|
||||
\
|
||||
diagnostic(bool, LogCompilation, false, \
|
||||
"Log compilation activity in detail to LogFile") \
|
||||
\
|
||||
@ -2789,12 +2800,6 @@ class CommandLineFlags {
|
||||
product(bool, UseLoopCounter, true, \
|
||||
"Increment invocation counter on backward branch") \
|
||||
\
|
||||
product(bool, UseFastEmptyMethods, true, \
|
||||
"Use fast method entry code for empty methods") \
|
||||
\
|
||||
product(bool, UseFastAccessorMethods, true, \
|
||||
"Use fast method entry code for accessor methods") \
|
||||
\
|
||||
product_pd(bool, UseOnStackReplacement, \
|
||||
"Use on stack replacement, calls runtime if invoc. counter " \
|
||||
"overflows in loop") \
|
||||
@ -3883,8 +3888,8 @@ class CommandLineFlags {
|
||||
diagnostic(ccstr, SharedArchiveFile, NULL, \
|
||||
"Override the default location of the CDS archive file") \
|
||||
\
|
||||
experimental(uintx, ArrayAllocatorMallocLimit, \
|
||||
SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx), \
|
||||
experimental(size_t, ArrayAllocatorMallocLimit, \
|
||||
SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \
|
||||
"Allocation less than this value will be allocated " \
|
||||
"using malloc. Larger allocations will use mmap.") \
|
||||
\
|
||||
|
@ -200,6 +200,7 @@ class CommandLineFlagsEx : CommandLineFlags {
|
||||
static void intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin);
|
||||
static void uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin);
|
||||
static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin);
|
||||
static void size_tAtPut(CommandLineFlagWithType flag, size_t value, Flag::Flags origin);
|
||||
static void doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin);
|
||||
// Contract: Flag will make private copy of the incoming value
|
||||
static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -227,7 +227,7 @@ class HandleArea: public Arena {
|
||||
HandleArea* _prev; // link to outer (older) area
|
||||
public:
|
||||
// Constructor
|
||||
HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
|
||||
HandleArea(HandleArea* prev) : Arena(mtThread, Chunk::tiny_size) {
|
||||
debug_only(_handle_mark_nesting = 0);
|
||||
debug_only(_no_handle_mark_nesting = 0);
|
||||
_prev = prev;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,8 +34,10 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
|
||||
// Initialization done by VM thread in vm_init_globals()
|
||||
void check_ThreadShadow();
|
||||
void eventlog_init();
|
||||
@ -131,6 +133,12 @@ jint init_globals() {
|
||||
javaClasses_init(); // must happen after vtable initialization
|
||||
stubRoutines_init2(); // note: StubRoutines need 2-phase init
|
||||
|
||||
#if INCLUDE_NMT
|
||||
// Solaris stack is walkable only after stubRoutines are set up.
|
||||
// On Other platforms, the stack is always walkable.
|
||||
NMT_stack_walkable = true;
|
||||
#endif // INCLUDE_NMT
|
||||
|
||||
// All the flags that get adjusted by VM_Version_init and os::init_2
|
||||
// have been set so dump the flags now.
|
||||
if (PrintFlagsFinal) {
|
||||
|
@ -57,7 +57,6 @@
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
#include "runtime/vm_operations.hpp"
|
||||
#include "services/memReporter.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "trace/tracing.hpp"
|
||||
#include "utilities/dtrace.hpp"
|
||||
@ -349,12 +348,7 @@ void print_statistics() {
|
||||
#endif // ENABLE_ZAP_DEAD_LOCALS
|
||||
// Native memory tracking data
|
||||
if (PrintNMTStatistics) {
|
||||
if (MemTracker::is_on()) {
|
||||
BaselineTTYOutputer outputer(tty);
|
||||
MemTracker::print_memory_usage(outputer, K, false);
|
||||
} else {
|
||||
tty->print_cr("%s", MemTracker::reason());
|
||||
}
|
||||
MemTracker::final_report(tty);
|
||||
}
|
||||
}
|
||||
|
||||
@ -390,12 +384,7 @@ void print_statistics() {
|
||||
|
||||
// Native memory tracking data
|
||||
if (PrintNMTStatistics) {
|
||||
if (MemTracker::is_on()) {
|
||||
BaselineTTYOutputer outputer(tty);
|
||||
MemTracker::print_memory_usage(outputer, K, false);
|
||||
} else {
|
||||
tty->print_cr("%s", MemTracker::reason());
|
||||
}
|
||||
MemTracker::final_report(tty);
|
||||
}
|
||||
}
|
||||
|
||||
@ -544,10 +533,6 @@ void before_exit(JavaThread * thread) {
|
||||
BeforeExit_lock->notify_all();
|
||||
}
|
||||
|
||||
// Shutdown NMT before exit. Otherwise,
|
||||
// it will run into trouble when system destroys static variables.
|
||||
MemTracker::shutdown(MemTracker::NMT_normal);
|
||||
|
||||
if (VerifyStringTableAtExit) {
|
||||
int fail_cnt = 0;
|
||||
{
|
||||
|
@ -52,6 +52,7 @@
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "services/attachListener.hpp"
|
||||
#include "services/nmtCommon.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "services/threadService.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
@ -516,6 +517,14 @@ char *os::strdup(const char *str, MEMFLAGS flags) {
|
||||
return dup_str;
|
||||
}
|
||||
|
||||
char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
|
||||
char* p = os::strdup(str, flags);
|
||||
if (p == NULL) {
|
||||
vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom");
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
#define paranoid 0 /* only set to 1 if you suspect checking code has bug */
|
||||
|
||||
@ -553,7 +562,11 @@ static u_char* testMalloc(size_t alloc_size) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
|
||||
void* os::malloc(size_t size, MEMFLAGS flags) {
|
||||
return os::malloc(size, flags, CALLER_PC);
|
||||
}
|
||||
|
||||
void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
|
||||
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
||||
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
|
||||
|
||||
@ -579,11 +592,15 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
|
||||
size = 1;
|
||||
}
|
||||
|
||||
// NMT support
|
||||
NMT_TrackingLevel level = MemTracker::tracking_level();
|
||||
size_t nmt_header_size = MemTracker::malloc_header_size(level);
|
||||
|
||||
#ifndef ASSERT
|
||||
const size_t alloc_size = size;
|
||||
const size_t alloc_size = size + nmt_header_size;
|
||||
#else
|
||||
const size_t alloc_size = GuardedMemory::get_total_size(size);
|
||||
if (size > alloc_size) { // Check for rollover.
|
||||
const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
|
||||
if (size + nmt_header_size > alloc_size) { // Check for rollover.
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
@ -602,7 +619,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
|
||||
return NULL;
|
||||
}
|
||||
// Wrap memory with guard
|
||||
GuardedMemory guarded(ptr, size);
|
||||
GuardedMemory guarded(ptr, size + nmt_header_size);
|
||||
ptr = guarded.get_user_ptr();
|
||||
#endif
|
||||
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
|
||||
@ -615,48 +632,50 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
|
||||
}
|
||||
|
||||
// we do not track guard memory
|
||||
MemTracker::record_malloc((address)ptr, size, memflags, caller == 0 ? CALLER_PC : caller);
|
||||
|
||||
return ptr;
|
||||
return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
|
||||
}
|
||||
|
||||
void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
|
||||
return os::realloc(memblock, size, flags, CALLER_PC);
|
||||
}
|
||||
|
||||
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
|
||||
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
|
||||
#ifndef ASSERT
|
||||
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
||||
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
|
||||
MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
|
||||
void* ptr = ::realloc(memblock, size);
|
||||
if (ptr != NULL) {
|
||||
tkr.record((address)memblock, (address)ptr, size, memflags,
|
||||
caller == 0 ? CALLER_PC : caller);
|
||||
} else {
|
||||
tkr.discard();
|
||||
}
|
||||
return ptr;
|
||||
// NMT support
|
||||
void* membase = MemTracker::record_free(memblock);
|
||||
NMT_TrackingLevel level = MemTracker::tracking_level();
|
||||
size_t nmt_header_size = MemTracker::malloc_header_size(level);
|
||||
void* ptr = ::realloc(membase, size + nmt_header_size);
|
||||
return MemTracker::record_malloc(ptr, size, memflags, stack, level);
|
||||
#else
|
||||
if (memblock == NULL) {
|
||||
return os::malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
|
||||
return os::malloc(size, memflags, stack);
|
||||
}
|
||||
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
|
||||
tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
|
||||
breakpoint();
|
||||
}
|
||||
verify_memory(memblock);
|
||||
// NMT support
|
||||
void* membase = MemTracker::malloc_base(memblock);
|
||||
verify_memory(membase);
|
||||
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
|
||||
if (size == 0) {
|
||||
return NULL;
|
||||
}
|
||||
// always move the block
|
||||
void* ptr = os::malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
|
||||
void* ptr = os::malloc(size, memflags, stack);
|
||||
if (PrintMalloc) {
|
||||
tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
|
||||
}
|
||||
// Copy to new memory if malloc didn't fail
|
||||
if ( ptr != NULL ) {
|
||||
GuardedMemory guarded(memblock);
|
||||
memcpy(ptr, memblock, MIN2(size, guarded.get_user_size()));
|
||||
if (paranoid) verify_memory(ptr);
|
||||
GuardedMemory guarded(MemTracker::malloc_base(memblock));
|
||||
// Guard's user data contains NMT header
|
||||
size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
|
||||
memcpy(ptr, memblock, MIN2(size, memblock_size));
|
||||
if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
|
||||
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
|
||||
tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
|
||||
breakpoint();
|
||||
@ -669,7 +688,6 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
|
||||
|
||||
|
||||
void os::free(void *memblock, MEMFLAGS memflags) {
|
||||
address trackp = (address) memblock;
|
||||
NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
|
||||
#ifdef ASSERT
|
||||
if (memblock == NULL) return;
|
||||
@ -677,20 +695,22 @@ void os::free(void *memblock, MEMFLAGS memflags) {
|
||||
if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
|
||||
breakpoint();
|
||||
}
|
||||
verify_memory(memblock);
|
||||
void* membase = MemTracker::record_free(memblock);
|
||||
verify_memory(membase);
|
||||
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
|
||||
|
||||
GuardedMemory guarded(memblock);
|
||||
GuardedMemory guarded(membase);
|
||||
size_t size = guarded.get_user_size();
|
||||
inc_stat_counter(&free_bytes, size);
|
||||
memblock = guarded.release_for_freeing();
|
||||
membase = guarded.release_for_freeing();
|
||||
if (PrintMalloc && tty != NULL) {
|
||||
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
|
||||
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase);
|
||||
}
|
||||
::free(membase);
|
||||
#else
|
||||
void* membase = MemTracker::record_free(memblock);
|
||||
::free(membase);
|
||||
#endif
|
||||
MemTracker::record_free(trackp, memflags);
|
||||
|
||||
::free(memblock);
|
||||
}
|
||||
|
||||
void os::init_random(long initval) {
|
||||
@ -1404,7 +1424,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
|
||||
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
|
||||
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -1414,7 +1434,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
|
||||
MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_type((address)result, flags);
|
||||
}
|
||||
|
||||
@ -1424,7 +1444,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
|
||||
char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
|
||||
char* result = pd_attempt_reserve_memory_at(bytes, addr);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -1464,23 +1484,29 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
|
||||
}
|
||||
|
||||
bool os::uncommit_memory(char* addr, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
|
||||
bool res = pd_uncommit_memory(addr, bytes);
|
||||
if (res) {
|
||||
tkr.record((address)addr, bytes);
|
||||
bool res;
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
|
||||
res = pd_uncommit_memory(addr, bytes);
|
||||
if (res) {
|
||||
tkr.record((address)addr, bytes);
|
||||
}
|
||||
} else {
|
||||
tkr.discard();
|
||||
res = pd_uncommit_memory(addr, bytes);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool os::release_memory(char* addr, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
bool res = pd_release_memory(addr, bytes);
|
||||
if (res) {
|
||||
tkr.record((address)addr, bytes);
|
||||
bool res;
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
res = pd_release_memory(addr, bytes);
|
||||
if (res) {
|
||||
tkr.record((address)addr, bytes);
|
||||
}
|
||||
} else {
|
||||
tkr.discard();
|
||||
res = pd_release_memory(addr, bytes);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1491,7 +1517,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
bool allow_exec) {
|
||||
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -1504,12 +1530,15 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
|
||||
}
|
||||
|
||||
bool os::unmap_memory(char *addr, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
bool result = pd_unmap_memory(addr, bytes);
|
||||
if (result) {
|
||||
tkr.record((address)addr, bytes);
|
||||
bool result;
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
result = pd_unmap_memory(addr, bytes);
|
||||
if (result) {
|
||||
tkr.record((address)addr, bytes);
|
||||
}
|
||||
} else {
|
||||
tkr.discard();
|
||||
result = pd_unmap_memory(addr, bytes);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -65,6 +65,8 @@ class JavaThread;
|
||||
class Event;
|
||||
class DLL;
|
||||
class FileHandle;
|
||||
class NativeCallStack;
|
||||
|
||||
template<class E> class GrowableArray;
|
||||
|
||||
// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
|
||||
@ -96,9 +98,11 @@ const bool ExecMem = true;
|
||||
// Typedef for structured exception handling support
|
||||
typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
|
||||
|
||||
class MallocTracker;
|
||||
|
||||
class os: AllStatic {
|
||||
friend class VMStructs;
|
||||
|
||||
friend class MallocTracker;
|
||||
public:
|
||||
enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
|
||||
|
||||
@ -160,7 +164,10 @@ class os: AllStatic {
|
||||
// Override me as needed
|
||||
static int file_name_strcmp(const char* s1, const char* s2);
|
||||
|
||||
// get/unset environment variable
|
||||
static bool getenv(const char* name, char* buffer, int len);
|
||||
static bool unsetenv(const char* name);
|
||||
|
||||
static bool have_special_privileges();
|
||||
|
||||
static jlong javaTimeMillis();
|
||||
@ -207,8 +214,13 @@ class os: AllStatic {
|
||||
|
||||
// Interface for detecting multiprocessor system
|
||||
static inline bool is_MP() {
|
||||
#if !INCLUDE_NMT
|
||||
assert(_processor_count > 0, "invalid processor count");
|
||||
return _processor_count > 1 || AssumeMP;
|
||||
#else
|
||||
// NMT needs atomic operations before this initialization.
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
static julong available_memory();
|
||||
static julong physical_memory();
|
||||
@ -635,15 +647,25 @@ class os: AllStatic {
|
||||
static void* thread_local_storage_at(int index);
|
||||
static void free_thread_local_storage(int index);
|
||||
|
||||
// Stack walk
|
||||
static address get_caller_pc(int n = 0);
|
||||
// Retrieve native stack frames.
|
||||
// Parameter:
|
||||
// stack: an array to storage stack pointers.
|
||||
// frames: size of above array.
|
||||
// toSkip: number of stack frames to skip at the beginning.
|
||||
// Return: number of stack frames captured.
|
||||
static int get_native_stack(address* stack, int size, int toSkip = 0);
|
||||
|
||||
// General allocation (must be MT-safe)
|
||||
static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0);
|
||||
static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
|
||||
static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
|
||||
static void* malloc (size_t size, MEMFLAGS flags);
|
||||
static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
|
||||
static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
|
||||
|
||||
static void free (void *memblock, MEMFLAGS flags = mtNone);
|
||||
static bool check_heap(bool force = false); // verify C heap integrity
|
||||
static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
|
||||
// Like strdup, but exit VM when strdup() returns NULL
|
||||
static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal);
|
||||
|
||||
#ifndef PRODUCT
|
||||
static julong num_mallocs; // # of calls to malloc/realloc
|
||||
|
@ -52,7 +52,6 @@
|
||||
#include "runtime/sweeper.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "services/runtimeService.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
@ -527,10 +526,6 @@ void SafepointSynchronize::do_cleanup_tasks() {
|
||||
TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
|
||||
ClassLoaderDataGraph::purge_if_needed();
|
||||
}
|
||||
|
||||
if (MemTracker::is_on()) {
|
||||
MemTracker::sync();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -297,8 +297,7 @@ void Thread::record_stack_base_and_size() {
|
||||
#if INCLUDE_NMT
|
||||
// record thread's native stack, stack grows downward
|
||||
address stack_low_addr = stack_base() - stack_size();
|
||||
MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
|
||||
CURRENT_PC);
|
||||
MemTracker::record_thread_stack(stack_low_addr, stack_size());
|
||||
#endif // INCLUDE_NMT
|
||||
}
|
||||
|
||||
@ -316,7 +315,7 @@ Thread::~Thread() {
|
||||
#if INCLUDE_NMT
|
||||
if (_stack_base != NULL) {
|
||||
address low_stack_addr = stack_base() - stack_size();
|
||||
MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
|
||||
MemTracker::release_thread_stack(low_stack_addr, stack_size());
|
||||
#ifdef ASSERT
|
||||
set_stack_base(NULL);
|
||||
#endif
|
||||
@ -1425,9 +1424,6 @@ void JavaThread::initialize() {
|
||||
set_monitor_chunks(NULL);
|
||||
set_next(NULL);
|
||||
set_thread_state(_thread_new);
|
||||
#if INCLUDE_NMT
|
||||
set_recorder(NULL);
|
||||
#endif
|
||||
_terminated = _not_terminated;
|
||||
_privileged_stack_top = NULL;
|
||||
_array_for_gc = NULL;
|
||||
@ -1503,7 +1499,6 @@ JavaThread::JavaThread(bool is_attaching_via_jni) :
|
||||
_jni_attach_state = _not_attaching_via_jni;
|
||||
}
|
||||
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
|
||||
_safepoint_visible = false;
|
||||
}
|
||||
|
||||
bool JavaThread::reguard_stack(address cur_sp) {
|
||||
@ -1566,7 +1561,6 @@ JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
|
||||
thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
|
||||
os::java_thread;
|
||||
os::create_thread(this, thr_type, stack_sz);
|
||||
_safepoint_visible = false;
|
||||
// The _osthread may be NULL here because we ran out of memory (too many threads active).
|
||||
// We need to throw and OutOfMemoryError - however we cannot do this here because the caller
|
||||
// may hold a lock and all locks must be unlocked before throwing the exception (throwing
|
||||
@ -1584,13 +1578,6 @@ JavaThread::~JavaThread() {
|
||||
tty->print_cr("terminate thread %p", this);
|
||||
}
|
||||
|
||||
// By now, this thread should already be invisible to safepoint,
|
||||
// and its per-thread recorder also collected.
|
||||
assert(!is_safepoint_visible(), "wrong state");
|
||||
#if INCLUDE_NMT
|
||||
assert(get_recorder() == NULL, "Already collected");
|
||||
#endif // INCLUDE_NMT
|
||||
|
||||
// JSR166 -- return the parker to the free list
|
||||
Parker::Release(_parker);
|
||||
_parker = NULL;
|
||||
@ -3359,11 +3346,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
// initialize TLS
|
||||
ThreadLocalStorage::init();
|
||||
|
||||
// Bootstrap native memory tracking, so it can start recording memory
|
||||
// activities before worker thread is started. This is the first phase
|
||||
// of bootstrapping, VM is currently running in single-thread mode.
|
||||
MemTracker::bootstrap_single_thread();
|
||||
|
||||
// Initialize output stream logging
|
||||
ostream_init_log();
|
||||
|
||||
@ -3414,9 +3396,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
// Initialize Java-Level synchronization subsystem
|
||||
ObjectMonitor::Initialize();
|
||||
|
||||
// Second phase of bootstrapping, VM is about entering multi-thread mode
|
||||
MemTracker::bootstrap_multi_thread();
|
||||
|
||||
// Initialize global modules
|
||||
jint status = init_globals();
|
||||
if (status != JNI_OK) {
|
||||
@ -3438,9 +3417,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
// real raw monitor. VM is setup enough here for raw monitor enter.
|
||||
JvmtiExport::transition_pending_onload_raw_monitors();
|
||||
|
||||
// Fully start NMT
|
||||
MemTracker::start();
|
||||
|
||||
// Create the VMThread
|
||||
{ TraceTime timer("Start VMThread", TraceStartupTime);
|
||||
VMThread::create();
|
||||
@ -3995,8 +3971,6 @@ void Threads::add(JavaThread* p, bool force_daemon) {
|
||||
daemon = false;
|
||||
}
|
||||
|
||||
p->set_safepoint_visible(true);
|
||||
|
||||
ThreadService::add_thread(p, daemon);
|
||||
|
||||
// Possible GC point.
|
||||
@ -4042,13 +4016,6 @@ void Threads::remove(JavaThread* p) {
|
||||
// to do callbacks into the safepoint code. However, the safepoint code is not aware
|
||||
// of this thread since it is removed from the queue.
|
||||
p->set_terminated_value();
|
||||
|
||||
// Now, this thread is not visible to safepoint
|
||||
p->set_safepoint_visible(false);
|
||||
// once the thread becomes safepoint invisible, we can not use its per-thread
|
||||
// recorder. And Threads::do_threads() no longer walks this thread, so we have
|
||||
// to release its per-thread recorder here.
|
||||
MemTracker::thread_exiting(p);
|
||||
} // unlock Threads_lock
|
||||
|
||||
// Since Events::log uses a lock, we grab it outside the Threads_lock
|
||||
|
@ -43,10 +43,6 @@
|
||||
#include "runtime/unhandledOops.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#if INCLUDE_NMT
|
||||
#include "services/memRecorder.hpp"
|
||||
#endif // INCLUDE_NMT
|
||||
|
||||
#include "trace/traceBackend.hpp"
|
||||
#include "trace/traceMacros.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
@ -1036,16 +1032,6 @@ class JavaThread: public Thread {
|
||||
bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
|
||||
void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
|
||||
|
||||
#if INCLUDE_NMT
|
||||
// native memory tracking
|
||||
inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; }
|
||||
inline void set_recorder(MemRecorder* rc) { _recorder = rc; }
|
||||
|
||||
private:
|
||||
// per-thread memory recorder
|
||||
MemRecorder* volatile _recorder;
|
||||
#endif // INCLUDE_NMT
|
||||
|
||||
// Suspend/resume support for JavaThread
|
||||
private:
|
||||
inline void set_ext_suspended();
|
||||
@ -1485,19 +1471,6 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
// NMT (Native memory tracking) support.
|
||||
// This flag helps NMT to determine if this JavaThread will be blocked
|
||||
// at safepoint. If not, ThreadCritical is needed for writing memory records.
|
||||
// JavaThread is only safepoint visible when it is in Threads' thread list,
|
||||
// it is not visible until it is added to the list and becomes invisible
|
||||
// once it is removed from the list.
|
||||
public:
|
||||
bool is_safepoint_visible() const { return _safepoint_visible; }
|
||||
void set_safepoint_visible(bool visible) { _safepoint_visible = visible; }
|
||||
private:
|
||||
bool _safepoint_visible;
|
||||
|
||||
// Static operations
|
||||
public:
|
||||
// Returns the running thread as a JavaThread
|
||||
static inline JavaThread* current();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user