This commit is contained in:
Jesper Wilhelmsson 2016-01-26 17:13:18 +01:00
commit e8e6b0e6f0
295 changed files with 11165 additions and 5856 deletions

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -48,9 +48,9 @@ CC_COMPILER_REV := \
$(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
# Pick which compiler is validated
# Validated compiler for JDK9 is SS12.3 (5.12)
VALIDATED_COMPILER_REVS := 5.12
VALIDATED_CC_COMPILER_REVS := 5.12
# Validated compiler for JDK9 is SS12.4 (5.13)
VALIDATED_COMPILER_REVS := 5.13
VALIDATED_CC_COMPILER_REVS := 5.13
# Warning messages about not using the above validated versions
ENFORCE_COMPILER_REV${ENFORCE_COMPILER_REV} := $(strip ${VALIDATED_COMPILER_REVS})

View File

@ -31,12 +31,6 @@
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
int TemplateInterpreter::InterpreterCodeSize = 200 * 1024;
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;

View File

@ -1,261 +0,0 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#define __ _masm->
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
__ andr(esp, esp, -16);
__ mov(c_rarg3, esp);
// rmethod
// rlocals
// c_rarg3: first stack arg - wordSize
// adjust sp
__ sub(sp, c_rarg3, 18 * wordSize);
__ str(lr, Address(__ pre(sp, -2 * wordSize)));
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler),
rmethod, rlocals, c_rarg3);
// r0: result handler
// Stack layout:
// rsp: return address <- sp
// 1 garbage
// 8 integer args (if static first is unused)
// 1 float/double identifiers
// 8 double args
// stack args <- esp
// garbage
// expression stack bottom
// bcp (NULL)
// ...
// Restore LR
__ ldr(lr, Address(__ post(sp, 2 * wordSize)));
// Do FP first so we can use c_rarg3 as temp
__ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
const FloatRegister r = as_FloatRegister(i);
Label d, done;
__ tbnz(c_rarg3, i, d);
__ ldrs(r, Address(sp, (10 + i) * wordSize));
__ b(done);
__ bind(d);
__ ldrd(r, Address(sp, (10 + i) * wordSize));
__ bind(done);
}
// c_rarg0 contains the result from the call of
// InterpreterRuntime::slow_signature_handler so we don't touch it
// here. It will be loaded with the JNIEnv* later.
__ ldr(c_rarg1, Address(sp, 1 * wordSize));
for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) {
Register rm = as_Register(i), rn = as_Register(i+1);
__ ldp(rm, rn, Address(sp, i * wordSize));
}
__ add(sp, sp, 18 * wordSize);
__ ret(lr);
return entry;
}
//
// Various method entries
//
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rmethod: Method*
// r13: sender sp
// esp: args
if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
// These don't need a safepoint check because they aren't virtually
// callable. We won't enter these intrinsics from compiled code.
// If in the future we added an intrinsic which was virtually callable
// we'd have to worry about how to safepoint so that this code is used.
// mathematical functions inlined by compiler
// (interpreter must provide identical implementation
// in order to avoid monotonicity bugs when switching
// from interpreter to compiler in the middle of some
// computation)
//
// stack:
// [ arg ] <-- esp
// [ arg ]
// retaddr in lr
address entry_point = NULL;
Register continuation = lr;
switch (kind) {
case Interpreter::java_lang_math_abs:
entry_point = __ pc();
__ ldrd(v0, Address(esp));
__ fabsd(v0, v0);
__ mov(sp, r13); // Restore caller's SP
break;
case Interpreter::java_lang_math_sqrt:
entry_point = __ pc();
__ ldrd(v0, Address(esp));
__ fsqrtd(v0, v0);
__ mov(sp, r13);
break;
case Interpreter::java_lang_math_sin :
case Interpreter::java_lang_math_cos :
case Interpreter::java_lang_math_tan :
case Interpreter::java_lang_math_log :
case Interpreter::java_lang_math_log10 :
case Interpreter::java_lang_math_exp :
entry_point = __ pc();
__ ldrd(v0, Address(esp));
__ mov(sp, r13);
__ mov(r19, lr);
continuation = r19; // The first callee-saved register
generate_transcendental_entry(kind, 1);
break;
case Interpreter::java_lang_math_pow :
entry_point = __ pc();
__ mov(r19, lr);
continuation = r19;
__ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize));
__ ldrd(v1, Address(esp));
__ mov(sp, r13);
generate_transcendental_entry(kind, 2);
break;
default:
;
}
if (entry_point) {
__ br(continuation);
}
return entry_point;
}
// double trigonometrics and transcendentals
// static jdouble dsin(jdouble x);
// static jdouble dcos(jdouble x);
// static jdouble dtan(jdouble x);
// static jdouble dlog(jdouble x);
// static jdouble dlog10(jdouble x);
// static jdouble dexp(jdouble x);
// static jdouble dpow(jdouble x, jdouble y);
void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) {
address fn;
switch (kind) {
case Interpreter::java_lang_math_sin :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
break;
case Interpreter::java_lang_math_cos :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
break;
case Interpreter::java_lang_math_tan :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
break;
case Interpreter::java_lang_math_log :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
break;
case Interpreter::java_lang_math_log10 :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
break;
case Interpreter::java_lang_math_exp :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
break;
case Interpreter::java_lang_math_pow :
fpargs = 2;
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
break;
default:
ShouldNotReachHere();
fn = NULL; // unreachable
}
const int gpargs = 0, rtype = 3;
__ mov(rscratch1, fn);
__ blrt(rscratch1, gpargs, fpargs, rtype);
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
// rmethod: Method*
// r13: sender SP
address entry_point = __ pc();
// abstract method entry
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}

View File

@ -1090,7 +1090,7 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
@ -1272,14 +1272,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -57,6 +57,13 @@
#include "../../../../../../simulator/simulator.hpp"
#endif
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
int TemplateInterpreter::InterpreterCodeSize = 200 * 1024;
#define __ _masm->
//-----------------------------------------------------------------------------
@ -65,6 +72,213 @@ extern "C" void entry(CodeBuffer*);
//-----------------------------------------------------------------------------
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
__ andr(esp, esp, -16);
__ mov(c_rarg3, esp);
// rmethod
// rlocals
// c_rarg3: first stack arg - wordSize
// adjust sp
__ sub(sp, c_rarg3, 18 * wordSize);
__ str(lr, Address(__ pre(sp, -2 * wordSize)));
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler),
rmethod, rlocals, c_rarg3);
// r0: result handler
// Stack layout:
// rsp: return address <- sp
// 1 garbage
// 8 integer args (if static first is unused)
// 1 float/double identifiers
// 8 double args
// stack args <- esp
// garbage
// expression stack bottom
// bcp (NULL)
// ...
// Restore LR
__ ldr(lr, Address(__ post(sp, 2 * wordSize)));
// Do FP first so we can use c_rarg3 as temp
__ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
const FloatRegister r = as_FloatRegister(i);
Label d, done;
__ tbnz(c_rarg3, i, d);
__ ldrs(r, Address(sp, (10 + i) * wordSize));
__ b(done);
__ bind(d);
__ ldrd(r, Address(sp, (10 + i) * wordSize));
__ bind(done);
}
// c_rarg0 contains the result from the call of
// InterpreterRuntime::slow_signature_handler so we don't touch it
// here. It will be loaded with the JNIEnv* later.
__ ldr(c_rarg1, Address(sp, 1 * wordSize));
for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) {
Register rm = as_Register(i), rn = as_Register(i+1);
__ ldp(rm, rn, Address(sp, i * wordSize));
}
__ add(sp, sp, 18 * wordSize);
__ ret(lr);
return entry;
}
//
// Various method entries
//
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rmethod: Method*
// r13: sender sp
// esp: args
if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
// These don't need a safepoint check because they aren't virtually
// callable. We won't enter these intrinsics from compiled code.
// If in the future we added an intrinsic which was virtually callable
// we'd have to worry about how to safepoint so that this code is used.
// mathematical functions inlined by compiler
// (interpreter must provide identical implementation
// in order to avoid monotonicity bugs when switching
// from interpreter to compiler in the middle of some
// computation)
//
// stack:
// [ arg ] <-- esp
// [ arg ]
// retaddr in lr
address entry_point = NULL;
Register continuation = lr;
switch (kind) {
case Interpreter::java_lang_math_abs:
entry_point = __ pc();
__ ldrd(v0, Address(esp));
__ fabsd(v0, v0);
__ mov(sp, r13); // Restore caller's SP
break;
case Interpreter::java_lang_math_sqrt:
entry_point = __ pc();
__ ldrd(v0, Address(esp));
__ fsqrtd(v0, v0);
__ mov(sp, r13);
break;
case Interpreter::java_lang_math_sin :
case Interpreter::java_lang_math_cos :
case Interpreter::java_lang_math_tan :
case Interpreter::java_lang_math_log :
case Interpreter::java_lang_math_log10 :
case Interpreter::java_lang_math_exp :
entry_point = __ pc();
__ ldrd(v0, Address(esp));
__ mov(sp, r13);
__ mov(r19, lr);
continuation = r19; // The first callee-saved register
generate_transcendental_entry(kind, 1);
break;
case Interpreter::java_lang_math_pow :
entry_point = __ pc();
__ mov(r19, lr);
continuation = r19;
__ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize));
__ ldrd(v1, Address(esp));
__ mov(sp, r13);
generate_transcendental_entry(kind, 2);
break;
default:
;
}
if (entry_point) {
__ br(continuation);
}
return entry_point;
}
// double trigonometrics and transcendentals
// static jdouble dsin(jdouble x);
// static jdouble dcos(jdouble x);
// static jdouble dtan(jdouble x);
// static jdouble dlog(jdouble x);
// static jdouble dlog10(jdouble x);
// static jdouble dexp(jdouble x);
// static jdouble dpow(jdouble x, jdouble y);
void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) {
address fn;
switch (kind) {
case Interpreter::java_lang_math_sin :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
break;
case Interpreter::java_lang_math_cos :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
break;
case Interpreter::java_lang_math_tan :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
break;
case Interpreter::java_lang_math_log :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
break;
case Interpreter::java_lang_math_log10 :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
break;
case Interpreter::java_lang_math_exp :
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
break;
case Interpreter::java_lang_math_pow :
fpargs = 2;
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
break;
default:
ShouldNotReachHere();
fn = NULL; // unreachable
}
const int gpargs = 0, rtype = 3;
__ mov(rscratch1, fn);
__ blrt(rscratch1, gpargs, fpargs, rtype);
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
// rmethod: Method*
// r13: sender SP
address entry_point = __ pc();
// abstract method entry
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}
address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
address entry = __ pc();

View File

@ -31,13 +31,6 @@
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
int TemplateInterpreter::InterpreterCodeSize = 230*K;
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;
switch (type) {
@ -58,10 +51,19 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
// Support abs and sqrt like in compiler.
// For others we can use a normal (native) entry.
bool AbstractInterpreter::math_entry_available(AbstractInterpreter::MethodKind kind) {
if (!InlineIntrinsics) return false;
return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
(kind==Interpreter::java_lang_math_abs));
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
return !TemplateInterpreter::math_entry_available(method_kind(m));
return !math_entry_available(method_kind(m));
}
// How much stack a method activation needs in stack slots.
@ -159,15 +161,3 @@ void AbstractInterpreter::layout_activation(Method* method,
interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
}
}
// Support abs and sqrt like in compiler.
// For others we can use a normal (native) entry.
bool TemplateInterpreter::math_entry_available(AbstractInterpreter::MethodKind kind) {
if (!InlineIntrinsics) return false;
return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
(kind==Interpreter::java_lang_math_abs));
}

View File

@ -1,555 +0,0 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#define __ _masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) // nothing
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Slow_signature handler that respects the PPC C calling conventions.
//
// We get called by the native entry code with our output register
// area == 8. First we call InterpreterRuntime::get_result_handler
// to copy the pointer to the signature string temporarily to the
// first C-argument and to return the result_handler in
// R3_RET. Since native_entry will copy the jni-pointer to the
// first C-argument slot later on, it is OK to occupy this slot
// temporarilly. Then we copy the argument list on the java
// expression stack into native varargs format on the native stack
// and load arguments into argument registers. Integer arguments in
// the varargs vector will be sign-extended to 8 bytes.
//
// On entry:
// R3_ARG1 - intptr_t* Address of java argument list in memory.
// R15_prev_state - BytecodeInterpreter* Address of interpreter state for
// this method
// R19_method
//
// On exit (just before return instruction):
// R3_RET - contains the address of the result_handler.
// R4_ARG2 - is not updated for static methods and contains "this" otherwise.
// R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double,
// ARGi contains this argument. Otherwise, ARGi is not updated.
// F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double.
const int LogSizeOfTwoInstructions = 3;
// FIXME: use Argument:: GL: Argument names different numbers!
const int max_fp_register_arguments = 13;
const int max_int_register_arguments = 6; // first 2 are reserved
const Register arg_java = R21_tmp1;
const Register arg_c = R22_tmp2;
const Register signature = R23_tmp3; // is string
const Register sig_byte = R24_tmp4;
const Register fpcnt = R25_tmp5;
const Register argcnt = R26_tmp6;
const Register intSlot = R27_tmp7;
const Register target_sp = R28_tmp8;
const FloatRegister floatSlot = F0;
address entry = __ function_entry();
__ save_LR_CR(R0);
__ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
// We use target_sp for storing arguments in the C frame.
__ mr(target_sp, R1_SP);
__ push_frame_reg_args_nonvolatiles(0, R11_scratch1);
__ mr(arg_java, R3_ARG1);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method);
// Signature is in R3_RET. Signature is callee saved.
__ mr(signature, R3_RET);
// Get the result handler.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
{
Label L;
// test if static
// _access_flags._flags must be at offset 0.
// TODO PPC port: requires change in shared code.
//assert(in_bytes(AccessFlags::flags_offset()) == 0,
// "MethodDesc._access_flags == MethodDesc._access_flags._flags");
// _access_flags must be a 32 bit value.
assert(sizeof(AccessFlags) == 4, "wrong size");
__ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
// testbit with condition register.
__ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
__ btrue(CCR0, L);
// For non-static functions, pass "this" in R4_ARG2 and copy it
// to 2nd C-arg slot.
// We need to box the Java object here, so we use arg_java
// (address of current Java stack slot) as argument and don't
// dereference it as in case of ints, floats, etc.
__ mr(R4_ARG2, arg_java);
__ addi(arg_java, arg_java, -BytesPerWord);
__ std(R4_ARG2, _abi(carg_2), target_sp);
__ bind(L);
}
// Will be incremented directly after loop_start. argcnt=0
// corresponds to 3rd C argument.
__ li(argcnt, -1);
// arg_c points to 3rd C argument
__ addi(arg_c, target_sp, _abi(carg_3));
// no floating-point args parsed so far
__ li(fpcnt, 0);
Label move_intSlot_to_ARG, move_floatSlot_to_FARG;
Label loop_start, loop_end;
Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed;
// signature points to '(' at entry
#ifdef ASSERT
__ lbz(sig_byte, 0, signature);
__ cmplwi(CCR0, sig_byte, '(');
__ bne(CCR0, do_dontreachhere);
#endif
__ bind(loop_start);
__ addi(argcnt, argcnt, 1);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, ')'); // end of signature
__ beq(CCR0, loop_end);
__ cmplwi(CCR0, sig_byte, 'B'); // byte
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'C'); // char
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'D'); // double
__ beq(CCR0, do_double);
__ cmplwi(CCR0, sig_byte, 'F'); // float
__ beq(CCR0, do_float);
__ cmplwi(CCR0, sig_byte, 'I'); // int
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'J'); // long
__ beq(CCR0, do_long);
__ cmplwi(CCR0, sig_byte, 'S'); // short
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'Z'); // boolean
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'L'); // object
__ beq(CCR0, do_object);
__ cmplwi(CCR0, sig_byte, '['); // array
__ beq(CCR0, do_array);
// __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
// __ beq(CCR0, do_void);
__ bind(do_dontreachhere);
__ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
__ bind(do_array);
{
Label start_skip, end_skip;
__ bind(start_skip);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, '[');
__ beq(CCR0, start_skip); // skip further brackets
__ cmplwi(CCR0, sig_byte, '9');
__ bgt(CCR0, end_skip); // no optional size
__ cmplwi(CCR0, sig_byte, '0');
__ bge(CCR0, start_skip); // skip optional size
__ bind(end_skip);
__ cmplwi(CCR0, sig_byte, 'L');
__ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped
__ b(do_boxed); // otherwise, go directly to do_boxed
}
__ bind(do_object);
{
Label L;
__ bind(L);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, ';');
__ bne(CCR0, L);
}
// Need to box the Java object here, so we use arg_java (address of
// current Java stack slot) as argument and don't dereference it as
// in case of ints, floats, etc.
Label do_null;
__ bind(do_boxed);
__ ld(R0,0, arg_java);
__ cmpdi(CCR0, R0, 0);
__ li(intSlot,0);
__ beq(CCR0, do_null);
__ mr(intSlot, arg_java);
__ bind(do_null);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_int);
__ lwa(intSlot, 0, arg_java);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_long);
__ ld(intSlot, -BytesPerWord, arg_java);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_float);
__ lfs(floatSlot, 0, arg_java);
#if defined(LINUX)
// Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
// in the least significant word of an argument slot.
#if defined(VM_LITTLE_ENDIAN)
__ stfs(floatSlot, 0, arg_c);
#else
__ stfs(floatSlot, 4, arg_c);
#endif
#elif defined(AIX)
// Although AIX runs on big endian CPU, float is in most significant
// word of an argument slot.
__ stfs(floatSlot, 0, arg_c);
#else
#error "unknown OS"
#endif
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
__ blt(CCR0, move_floatSlot_to_FARG);
__ b(loop_start);
__ bind(do_double);
__ lfd(floatSlot, - BytesPerWord, arg_java);
__ stfd(floatSlot, 0, arg_c);
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
__ blt(CCR0, move_floatSlot_to_FARG);
__ b(loop_start);
__ bind(loop_end);
__ pop_frame();
__ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
__ restore_LR_CR(R0);
__ blr();
Label move_int_arg, move_float_arg;
__ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
__ mr(R5_ARG3, intSlot); __ b(loop_start);
__ mr(R6_ARG4, intSlot); __ b(loop_start);
__ mr(R7_ARG5, intSlot); __ b(loop_start);
__ mr(R8_ARG6, intSlot); __ b(loop_start);
__ mr(R9_ARG7, intSlot); __ b(loop_start);
__ mr(R10_ARG8, intSlot); __ b(loop_start);
__ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
__ fmr(F1_ARG1, floatSlot); __ b(loop_start);
__ fmr(F2_ARG2, floatSlot); __ b(loop_start);
__ fmr(F3_ARG3, floatSlot); __ b(loop_start);
__ fmr(F4_ARG4, floatSlot); __ b(loop_start);
__ fmr(F5_ARG5, floatSlot); __ b(loop_start);
__ fmr(F6_ARG6, floatSlot); __ b(loop_start);
__ fmr(F7_ARG7, floatSlot); __ b(loop_start);
__ fmr(F8_ARG8, floatSlot); __ b(loop_start);
__ fmr(F9_ARG9, floatSlot); __ b(loop_start);
__ fmr(F10_ARG10, floatSlot); __ b(loop_start);
__ fmr(F11_ARG11, floatSlot); __ b(loop_start);
__ fmr(F12_ARG12, floatSlot); __ b(loop_start);
__ fmr(F13_ARG13, floatSlot); __ b(loop_start);
__ bind(move_intSlot_to_ARG);
__ sldi(R0, argcnt, LogSizeOfTwoInstructions);
__ load_const(R11_scratch1, move_int_arg); // Label must be bound here.
__ add(R11_scratch1, R0, R11_scratch1);
__ mtctr(R11_scratch1/*branch_target*/);
__ bctr();
__ bind(move_floatSlot_to_FARG);
__ sldi(R0, fpcnt, LogSizeOfTwoInstructions);
__ addi(fpcnt, fpcnt, 1);
__ load_const(R11_scratch1, move_float_arg); // Label must be bound here.
__ add(R11_scratch1, R0, R11_scratch1);
__ mtctr(R11_scratch1/*branch_target*/);
__ bctr();
return entry;
}
address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type) {
//
// Registers alive
// R3_RET
// LR
//
// Registers updated
// R3_RET
//
Label done;
address entry = __ pc();
switch (type) {
case T_BOOLEAN:
// convert !=0 to 1
__ neg(R0, R3_RET);
__ orr(R0, R3_RET, R0);
__ srwi(R3_RET, R0, 31);
break;
case T_BYTE:
// sign extend 8 bits
__ extsb(R3_RET, R3_RET);
break;
case T_CHAR:
// zero extend 16 bits
__ clrldi(R3_RET, R3_RET, 48);
break;
case T_SHORT:
// sign extend 16 bits
__ extsh(R3_RET, R3_RET);
break;
case T_INT:
// sign extend 32 bits
__ extsw(R3_RET, R3_RET);
break;
case T_LONG:
break;
case T_OBJECT:
// unbox result if not null
__ cmpdi(CCR0, R3_RET, 0);
__ beq(CCR0, done);
__ ld(R3_RET, 0, R3_RET);
__ verify_oop(R3_RET);
break;
case T_FLOAT:
break;
case T_DOUBLE:
break;
case T_VOID:
break;
default: ShouldNotReachHere();
}
__ BIND(done);
__ blr();
return entry;
}
// Abstract method entry.
//
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
address entry = __ pc();
//
// Registers alive
// R16_thread - JavaThread*
// R19_method - callee's method (method to be invoked)
// R1_SP - SP prepared such that caller's outgoing args are near top
// LR - return address to caller
//
// Stack layout at this point:
//
// 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP
// alignment (optional)
// [outgoing Java arguments]
// ...
// PARENT [PARENT_IJAVA_FRAME_ABI]
// ...
//
// Can't use call_VM here because we have not set up a new
// interpreter state. Make the call to the vm and make it look like
// our caller set up the JavaFrameAnchor.
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
// Push a new C frame and save LR.
__ save_LR_CR(R0);
__ push_frame_reg_args(0, R11_scratch1);
// This is not a leaf but we have a JavaFrameAnchor now and we will
// check (create) exceptions afterward so this is ok.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError),
R16_thread);
// Pop the C frame and restore LR.
__ pop_frame();
__ restore_LR_CR(R0);
// Reset JavaFrameAnchor from call_VM_leaf above.
__ reset_last_Java_frame();
// We don't know our caller, so jump to the general forward exception stub,
// which will also pop our full frame off. Satisfy the interface of
// SharedRuntime::generate_forward_exception()
__ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0);
__ mtctr(R11_scratch1);
__ bctr();
return entry;
}
// Interpreter intrinsic for WeakReference.get().
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
// into R8 and return quickly
// 2. If G1 is active we *must* execute this intrinsic for corrrectness:
// It contains a GC barrier which puts the reference into the satb buffer
// to indicate that someone holds a strong reference to the object the
// weak ref points to!
address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. the "intrinsified" code for G1 (or any SATB based GC),
// 2. the slow path - which is an expansion of the regular method entry.
//
// Notes:
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
if (UseG1GC) {
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
Label slow_path;
// Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
// In the G1 code we don't check if we need to reach a safepoint. We
// continue and the thread will safepoint at the next bytecode dispatch.
// If the receiver is null then it is OK to jump to the slow path.
__ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver
// Check if receiver == NULL and go the slow path.
__ cmpdi(CCR0, R3_RET, 0);
__ beq(CCR0, slow_path);
// Load the value of the referent field.
__ load_heap_oop(R3_RET, referent_offset, R3_RET);
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer. Note with
// these parameters the pre-barrier does not generate
// the load of the previous value.
// Restore caller sp for c2i case.
#ifdef ASSERT
__ ld(R9_ARG7, 0, R1_SP);
__ ld(R10_ARG8, 0, R21_sender_SP);
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
__ asm_assert_eq("backlink", 0x544);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
__ g1_write_barrier_pre(noreg, // obj
noreg, // offset
R3_RET, // pre_val
R11_scratch1, // tmp
R12_scratch2, // tmp
true); // needs_frame
__ blr();
// Generate regular method entry.
__ bind(slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
return entry;
}
return NULL;
}

View File

@ -1477,7 +1477,7 @@ static void save_or_restore_arguments(MacroAssembler* masm,
}
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
@ -1489,9 +1489,9 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
VMRegPair* in_regs,
BasicType* in_sig_bt,
Register tmp_reg ) {
__ block_comment("check GC_locker::needs_gc");
__ block_comment("check GCLocker::needs_gc");
Label cont;
__ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GC_locker::needs_gc_address());
__ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GCLocker::needs_gc_address());
__ cmplwi(CCR0, tmp_reg, 0);
__ beq(CCR0, cont);
@ -1690,14 +1690,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -51,6 +51,13 @@
#undef __
#define __ _masm->
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
int TemplateInterpreter::InterpreterCodeSize = 230*K;
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
@ -61,6 +68,500 @@
//-----------------------------------------------------------------------------
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// Slow_signature handler that respects the PPC C calling conventions.
//
// We get called by the native entry code with our output register
// area == 8. First we call InterpreterRuntime::get_result_handler
// to copy the pointer to the signature string temporarily to the
// first C-argument and to return the result_handler in
// R3_RET. Since native_entry will copy the jni-pointer to the
// first C-argument slot later on, it is OK to occupy this slot
// temporarilly. Then we copy the argument list on the java
// expression stack into native varargs format on the native stack
// and load arguments into argument registers. Integer arguments in
// the varargs vector will be sign-extended to 8 bytes.
//
// On entry:
// R3_ARG1 - intptr_t* Address of java argument list in memory.
// R15_prev_state - BytecodeInterpreter* Address of interpreter state for
// this method
// R19_method
//
// On exit (just before return instruction):
// R3_RET - contains the address of the result_handler.
// R4_ARG2 - is not updated for static methods and contains "this" otherwise.
// R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double,
// ARGi contains this argument. Otherwise, ARGi is not updated.
// F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double.
const int LogSizeOfTwoInstructions = 3;
// FIXME: use Argument:: GL: Argument names different numbers!
const int max_fp_register_arguments = 13;
const int max_int_register_arguments = 6; // first 2 are reserved
const Register arg_java = R21_tmp1;
const Register arg_c = R22_tmp2;
const Register signature = R23_tmp3; // is string
const Register sig_byte = R24_tmp4;
const Register fpcnt = R25_tmp5;
const Register argcnt = R26_tmp6;
const Register intSlot = R27_tmp7;
const Register target_sp = R28_tmp8;
const FloatRegister floatSlot = F0;
address entry = __ function_entry();
__ save_LR_CR(R0);
__ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
// We use target_sp for storing arguments in the C frame.
__ mr(target_sp, R1_SP);
__ push_frame_reg_args_nonvolatiles(0, R11_scratch1);
__ mr(arg_java, R3_ARG1);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method);
// Signature is in R3_RET. Signature is callee saved.
__ mr(signature, R3_RET);
// Get the result handler.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
{
Label L;
// test if static
// _access_flags._flags must be at offset 0.
// TODO PPC port: requires change in shared code.
//assert(in_bytes(AccessFlags::flags_offset()) == 0,
// "MethodDesc._access_flags == MethodDesc._access_flags._flags");
// _access_flags must be a 32 bit value.
assert(sizeof(AccessFlags) == 4, "wrong size");
__ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
// testbit with condition register.
__ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
__ btrue(CCR0, L);
// For non-static functions, pass "this" in R4_ARG2 and copy it
// to 2nd C-arg slot.
// We need to box the Java object here, so we use arg_java
// (address of current Java stack slot) as argument and don't
// dereference it as in case of ints, floats, etc.
__ mr(R4_ARG2, arg_java);
__ addi(arg_java, arg_java, -BytesPerWord);
__ std(R4_ARG2, _abi(carg_2), target_sp);
__ bind(L);
}
// Will be incremented directly after loop_start. argcnt=0
// corresponds to 3rd C argument.
__ li(argcnt, -1);
// arg_c points to 3rd C argument
__ addi(arg_c, target_sp, _abi(carg_3));
// no floating-point args parsed so far
__ li(fpcnt, 0);
Label move_intSlot_to_ARG, move_floatSlot_to_FARG;
Label loop_start, loop_end;
Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed;
// signature points to '(' at entry
#ifdef ASSERT
__ lbz(sig_byte, 0, signature);
__ cmplwi(CCR0, sig_byte, '(');
__ bne(CCR0, do_dontreachhere);
#endif
__ bind(loop_start);
__ addi(argcnt, argcnt, 1);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, ')'); // end of signature
__ beq(CCR0, loop_end);
__ cmplwi(CCR0, sig_byte, 'B'); // byte
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'C'); // char
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'D'); // double
__ beq(CCR0, do_double);
__ cmplwi(CCR0, sig_byte, 'F'); // float
__ beq(CCR0, do_float);
__ cmplwi(CCR0, sig_byte, 'I'); // int
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'J'); // long
__ beq(CCR0, do_long);
__ cmplwi(CCR0, sig_byte, 'S'); // short
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'Z'); // boolean
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'L'); // object
__ beq(CCR0, do_object);
__ cmplwi(CCR0, sig_byte, '['); // array
__ beq(CCR0, do_array);
// __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
// __ beq(CCR0, do_void);
__ bind(do_dontreachhere);
__ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
__ bind(do_array);
{
Label start_skip, end_skip;
__ bind(start_skip);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, '[');
__ beq(CCR0, start_skip); // skip further brackets
__ cmplwi(CCR0, sig_byte, '9');
__ bgt(CCR0, end_skip); // no optional size
__ cmplwi(CCR0, sig_byte, '0');
__ bge(CCR0, start_skip); // skip optional size
__ bind(end_skip);
__ cmplwi(CCR0, sig_byte, 'L');
__ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped
__ b(do_boxed); // otherwise, go directly to do_boxed
}
__ bind(do_object);
{
Label L;
__ bind(L);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, ';');
__ bne(CCR0, L);
}
// Need to box the Java object here, so we use arg_java (address of
// current Java stack slot) as argument and don't dereference it as
// in case of ints, floats, etc.
Label do_null;
__ bind(do_boxed);
__ ld(R0,0, arg_java);
__ cmpdi(CCR0, R0, 0);
__ li(intSlot,0);
__ beq(CCR0, do_null);
__ mr(intSlot, arg_java);
__ bind(do_null);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_int);
__ lwa(intSlot, 0, arg_java);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_long);
__ ld(intSlot, -BytesPerWord, arg_java);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_float);
__ lfs(floatSlot, 0, arg_java);
#if defined(LINUX)
// Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float
// in the least significant word of an argument slot.
#if defined(VM_LITTLE_ENDIAN)
__ stfs(floatSlot, 0, arg_c);
#else
__ stfs(floatSlot, 4, arg_c);
#endif
#elif defined(AIX)
// Although AIX runs on big endian CPU, float is in most significant
// word of an argument slot.
__ stfs(floatSlot, 0, arg_c);
#else
#error "unknown OS"
#endif
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
__ blt(CCR0, move_floatSlot_to_FARG);
__ b(loop_start);
__ bind(do_double);
__ lfd(floatSlot, - BytesPerWord, arg_java);
__ stfd(floatSlot, 0, arg_c);
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
__ blt(CCR0, move_floatSlot_to_FARG);
__ b(loop_start);
__ bind(loop_end);
__ pop_frame();
__ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
__ restore_LR_CR(R0);
__ blr();
Label move_int_arg, move_float_arg;
__ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
__ mr(R5_ARG3, intSlot); __ b(loop_start);
__ mr(R6_ARG4, intSlot); __ b(loop_start);
__ mr(R7_ARG5, intSlot); __ b(loop_start);
__ mr(R8_ARG6, intSlot); __ b(loop_start);
__ mr(R9_ARG7, intSlot); __ b(loop_start);
__ mr(R10_ARG8, intSlot); __ b(loop_start);
__ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
__ fmr(F1_ARG1, floatSlot); __ b(loop_start);
__ fmr(F2_ARG2, floatSlot); __ b(loop_start);
__ fmr(F3_ARG3, floatSlot); __ b(loop_start);
__ fmr(F4_ARG4, floatSlot); __ b(loop_start);
__ fmr(F5_ARG5, floatSlot); __ b(loop_start);
__ fmr(F6_ARG6, floatSlot); __ b(loop_start);
__ fmr(F7_ARG7, floatSlot); __ b(loop_start);
__ fmr(F8_ARG8, floatSlot); __ b(loop_start);
__ fmr(F9_ARG9, floatSlot); __ b(loop_start);
__ fmr(F10_ARG10, floatSlot); __ b(loop_start);
__ fmr(F11_ARG11, floatSlot); __ b(loop_start);
__ fmr(F12_ARG12, floatSlot); __ b(loop_start);
__ fmr(F13_ARG13, floatSlot); __ b(loop_start);
__ bind(move_intSlot_to_ARG);
__ sldi(R0, argcnt, LogSizeOfTwoInstructions);
__ load_const(R11_scratch1, move_int_arg); // Label must be bound here.
__ add(R11_scratch1, R0, R11_scratch1);
__ mtctr(R11_scratch1/*branch_target*/);
__ bctr();
__ bind(move_floatSlot_to_FARG);
__ sldi(R0, fpcnt, LogSizeOfTwoInstructions);
__ addi(fpcnt, fpcnt, 1);
__ load_const(R11_scratch1, move_float_arg); // Label must be bound here.
__ add(R11_scratch1, R0, R11_scratch1);
__ mtctr(R11_scratch1/*branch_target*/);
__ bctr();
return entry;
}
address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
//
// Registers alive
// R3_RET
// LR
//
// Registers updated
// R3_RET
//
Label done;
address entry = __ pc();
switch (type) {
case T_BOOLEAN:
// convert !=0 to 1
__ neg(R0, R3_RET);
__ orr(R0, R3_RET, R0);
__ srwi(R3_RET, R0, 31);
break;
case T_BYTE:
// sign extend 8 bits
__ extsb(R3_RET, R3_RET);
break;
case T_CHAR:
// zero extend 16 bits
__ clrldi(R3_RET, R3_RET, 48);
break;
case T_SHORT:
// sign extend 16 bits
__ extsh(R3_RET, R3_RET);
break;
case T_INT:
// sign extend 32 bits
__ extsw(R3_RET, R3_RET);
break;
case T_LONG:
break;
case T_OBJECT:
// unbox result if not null
__ cmpdi(CCR0, R3_RET, 0);
__ beq(CCR0, done);
__ ld(R3_RET, 0, R3_RET);
__ verify_oop(R3_RET);
break;
case T_FLOAT:
break;
case T_DOUBLE:
break;
case T_VOID:
break;
default: ShouldNotReachHere();
}
BIND(done);
__ blr();
return entry;
}
// Abstract method entry.
//
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
address entry = __ pc();
//
// Registers alive
// R16_thread - JavaThread*
// R19_method - callee's method (method to be invoked)
// R1_SP - SP prepared such that caller's outgoing args are near top
// LR - return address to caller
//
// Stack layout at this point:
//
// 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP
// alignment (optional)
// [outgoing Java arguments]
// ...
// PARENT [PARENT_IJAVA_FRAME_ABI]
// ...
//
// Can't use call_VM here because we have not set up a new
// interpreter state. Make the call to the vm and make it look like
// our caller set up the JavaFrameAnchor.
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
// Push a new C frame and save LR.
__ save_LR_CR(R0);
__ push_frame_reg_args(0, R11_scratch1);
// This is not a leaf but we have a JavaFrameAnchor now and we will
// check (create) exceptions afterward so this is ok.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError),
R16_thread);
// Pop the C frame and restore LR.
__ pop_frame();
__ restore_LR_CR(R0);
// Reset JavaFrameAnchor from call_VM_leaf above.
__ reset_last_Java_frame();
// We don't know our caller, so jump to the general forward exception stub,
// which will also pop our full frame off. Satisfy the interface of
// SharedRuntime::generate_forward_exception()
__ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0);
__ mtctr(R11_scratch1);
__ bctr();
return entry;
}
// Interpreter intrinsic for WeakReference.get().
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
// into R8 and return quickly
// 2. If G1 is active we *must* execute this intrinsic for corrrectness:
// It contains a GC barrier which puts the reference into the satb buffer
// to indicate that someone holds a strong reference to the object the
// weak ref points to!
address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. the "intrinsified" code for G1 (or any SATB based GC),
// 2. the slow path - which is an expansion of the regular method entry.
//
// Notes:
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
if (UseG1GC) {
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
Label slow_path;
// Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
// In the G1 code we don't check if we need to reach a safepoint. We
// continue and the thread will safepoint at the next bytecode dispatch.
// If the receiver is null then it is OK to jump to the slow path.
__ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver
// Check if receiver == NULL and go the slow path.
__ cmpdi(CCR0, R3_RET, 0);
__ beq(CCR0, slow_path);
// Load the value of the referent field.
__ load_heap_oop(R3_RET, referent_offset, R3_RET);
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer. Note with
// these parameters the pre-barrier does not generate
// the load of the previous value.
// Restore caller sp for c2i case.
#ifdef ASSERT
__ ld(R9_ARG7, 0, R1_SP);
__ ld(R10_ARG8, 0, R21_sender_SP);
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
__ asm_assert_eq("backlink", 0x544);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
__ g1_write_barrier_pre(noreg, // obj
noreg, // offset
R3_RET, // pre_val
R11_scratch1, // tmp
R12_scratch2, // tmp
true); // needs_frame
__ blr();
// Generate regular method entry.
__ bind(slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
return entry;
}
return NULL;
}
// Actually we should never reach here since we do stack overflow checks before pushing any frame.
address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
address entry = __ pc();
@ -222,12 +723,6 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
return entry;
}
// A result handler converts the native result into java format.
// Use the shared code between c++ and template interpreter.
address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
return AbstractInterpreterGenerator::generate_result_handler_for(type);
}
address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
address entry = __ pc();
@ -602,7 +1097,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
// End of helpers
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
if (!TemplateInterpreter::math_entry_available(kind)) {
if (!Interpreter::math_entry_available(kind)) {
NOT_PRODUCT(__ should_not_reach_here();)
return NULL;
}

View File

@ -31,18 +31,6 @@
#include "runtime/synchronizer.hpp"
#include "utilities/macros.hpp"
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
#ifdef _LP64
// The sethi() instruction generates lots more instructions when shell
// stack limit is unlimited, so that's why this is much bigger.
int TemplateInterpreter::InterpreterCodeSize = 260 * K;
#else
int TemplateInterpreter::InterpreterCodeSize = 230 * K;
#endif
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;

View File

@ -1,231 +0,0 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
// Generation of Interpreter
//
// The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
#define __ _masm->
//----------------------------------------------------------------------------------------------------
#ifndef _LP64
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
Argument argv(0, true);
// We are in the jni transition frame. Save the last_java_frame corresponding to the
// outer interpreter frame
//
__ set_last_Java_frame(FP, noreg);
// make sure the interpreter frame we've pushed has a valid return pc
__ mov(O7, I7);
__ mov(Lmethod, G3_scratch);
__ mov(Llocals, G4_scratch);
__ save_frame(0);
__ mov(G2_thread, L7_thread_cache);
__ add(argv.address_in_frame(), O3);
__ mov(G2_thread, O0);
__ mov(G3_scratch, O1);
__ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
__ delayed()->mov(G4_scratch, O2);
__ mov(L7_thread_cache, G2_thread);
__ reset_last_Java_frame();
// load the register arguments (the C code packed them as varargs)
for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
__ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
}
__ ret();
__ delayed()->
restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
return entry;
}
#else
// LP64 passes floating point arguments in F1, F3, F5, etc. instead of
// O0, O1, O2 etc..
// Doubles are passed in D0, D2, D4
// We store the signature of the first 16 arguments in the first argument
// slot because it will be overwritten prior to calling the native
// function, with the pointer to the JNIEnv.
// If LP64 there can be up to 16 floating point arguments in registers
// or 6 integer registers.
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
enum {
non_float = 0,
float_sig = 1,
double_sig = 2,
sig_mask = 3
};
address entry = __ pc();
Argument argv(0, true);
// We are in the jni transition frame. Save the last_java_frame corresponding to the
// outer interpreter frame
//
__ set_last_Java_frame(FP, noreg);
// make sure the interpreter frame we've pushed has a valid return pc
__ mov(O7, I7);
__ mov(Lmethod, G3_scratch);
__ mov(Llocals, G4_scratch);
__ save_frame(0);
__ mov(G2_thread, L7_thread_cache);
__ add(argv.address_in_frame(), O3);
__ mov(G2_thread, O0);
__ mov(G3_scratch, O1);
__ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
__ delayed()->mov(G4_scratch, O2);
__ mov(L7_thread_cache, G2_thread);
__ reset_last_Java_frame();
// load the register arguments (the C code packed them as varargs)
Address Sig = argv.address_in_frame(); // Argument 0 holds the signature
__ ld_ptr( Sig, G3_scratch ); // Get register argument signature word into G3_scratch
__ mov( G3_scratch, G4_scratch);
__ srl( G4_scratch, 2, G4_scratch); // Skip Arg 0
Label done;
for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) {
Label NonFloatArg;
Label LoadFloatArg;
Label LoadDoubleArg;
Label NextArg;
Address a = ldarg.address_in_frame();
__ andcc(G4_scratch, sig_mask, G3_scratch);
__ br(Assembler::zero, false, Assembler::pt, NonFloatArg);
__ delayed()->nop();
__ cmp(G3_scratch, float_sig );
__ br(Assembler::equal, false, Assembler::pt, LoadFloatArg);
__ delayed()->nop();
__ cmp(G3_scratch, double_sig );
__ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg);
__ delayed()->nop();
__ bind(NonFloatArg);
// There are only 6 integer register arguments!
if ( ldarg.is_register() )
__ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
else {
// Optimization, see if there are any more args and get out prior to checking
// all 16 float registers. My guess is that this is rare.
// If is_register is false, then we are done the first six integer args.
__ br_null_short(G4_scratch, Assembler::pt, done);
}
__ ba(NextArg);
__ delayed()->srl( G4_scratch, 2, G4_scratch );
__ bind(LoadFloatArg);
__ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4);
__ ba(NextArg);
__ delayed()->srl( G4_scratch, 2, G4_scratch );
__ bind(LoadDoubleArg);
__ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() );
__ ba(NextArg);
__ delayed()->srl( G4_scratch, 2, G4_scratch );
__ bind(NextArg);
}
__ bind(done);
__ ret();
__ delayed()->
restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
return entry;
}
#endif
void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
// Generate code to initiate compilation on the counter overflow.
// InterpreterRuntime::frequency_counter_overflow takes two arguments,
// the first indicates if the counter overflow occurs at a backwards branch (NULL bcp)
// and the second is only used when the first is true. We pass zero for both.
// The call returns the address of the verified entry point for the method or NULL
// if the compilation did not complete (either went background or bailed out).
__ set((int)false, O2);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
// returns verified_entry_point or NULL
// we ignore it in any case
__ ba_short(Lcontinue);
}
// End of helpers
// Various method entries
// Abstract method entry
// Attempt to execute abstract method. Throw exception
//
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
address entry = __ pc();
// abstract method entry
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3386,10 +3386,20 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
cmp(t1, t2);
brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
// increment waste limit to prevent getting stuck on this slow path
delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) {
delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
} else {
delayed()->nop();
// set64 does not use the temp register if the given constant is 32 bit. So
// we can just use any register; using G0 results in ignoring of the upper 32 bit
// of that value.
set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0);
add(t2, t3, t2);
}
st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
if (TLABStats) {
// increment number of slow_allocations

View File

@ -1748,7 +1748,7 @@ static void save_or_restore_arguments(MacroAssembler* masm,
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
@ -1759,9 +1759,9 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
OopMapSet* oop_maps,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc");
__ block_comment("check GCLocker::needs_gc");
Label cont;
AddressLiteral sync_state(GC_locker::needs_gc_address());
AddressLiteral sync_state(GCLocker::needs_gc_address());
__ load_bool_contents(sync_state, G3_scratch);
__ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
__ delayed()->nop();
@ -1936,14 +1936,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,6 +52,18 @@
#endif
#undef FAST_DISPATCH
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
#ifdef _LP64
// The sethi() instruction generates lots more instructions when shell
// stack limit is unlimited, so that's why this is much bigger.
int TemplateInterpreter::InterpreterCodeSize = 260 * K;
#else
int TemplateInterpreter::InterpreterCodeSize = 230 * K;
#endif
// Generation of Interpreter
//
@ -63,6 +75,174 @@
//----------------------------------------------------------------------------------------------------
#ifndef _LP64
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
Argument argv(0, true);
// We are in the jni transition frame. Save the last_java_frame corresponding to the
// outer interpreter frame
//
__ set_last_Java_frame(FP, noreg);
// make sure the interpreter frame we've pushed has a valid return pc
__ mov(O7, I7);
__ mov(Lmethod, G3_scratch);
__ mov(Llocals, G4_scratch);
__ save_frame(0);
__ mov(G2_thread, L7_thread_cache);
__ add(argv.address_in_frame(), O3);
__ mov(G2_thread, O0);
__ mov(G3_scratch, O1);
__ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
__ delayed()->mov(G4_scratch, O2);
__ mov(L7_thread_cache, G2_thread);
__ reset_last_Java_frame();
// load the register arguments (the C code packed them as varargs)
for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
__ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
}
__ ret();
__ delayed()->
restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
return entry;
}
#else
// LP64 passes floating point arguments in F1, F3, F5, etc. instead of
// O0, O1, O2 etc..
// Doubles are passed in D0, D2, D4
// We store the signature of the first 16 arguments in the first argument
// slot because it will be overwritten prior to calling the native
// function, with the pointer to the JNIEnv.
// If LP64 there can be up to 16 floating point arguments in registers
// or 6 integer registers.
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
enum {
non_float = 0,
float_sig = 1,
double_sig = 2,
sig_mask = 3
};
address entry = __ pc();
Argument argv(0, true);
// We are in the jni transition frame. Save the last_java_frame corresponding to the
// outer interpreter frame
//
__ set_last_Java_frame(FP, noreg);
// make sure the interpreter frame we've pushed has a valid return pc
__ mov(O7, I7);
__ mov(Lmethod, G3_scratch);
__ mov(Llocals, G4_scratch);
__ save_frame(0);
__ mov(G2_thread, L7_thread_cache);
__ add(argv.address_in_frame(), O3);
__ mov(G2_thread, O0);
__ mov(G3_scratch, O1);
__ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
__ delayed()->mov(G4_scratch, O2);
__ mov(L7_thread_cache, G2_thread);
__ reset_last_Java_frame();
// load the register arguments (the C code packed them as varargs)
Address Sig = argv.address_in_frame(); // Argument 0 holds the signature
__ ld_ptr( Sig, G3_scratch ); // Get register argument signature word into G3_scratch
__ mov( G3_scratch, G4_scratch);
__ srl( G4_scratch, 2, G4_scratch); // Skip Arg 0
Label done;
for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) {
Label NonFloatArg;
Label LoadFloatArg;
Label LoadDoubleArg;
Label NextArg;
Address a = ldarg.address_in_frame();
__ andcc(G4_scratch, sig_mask, G3_scratch);
__ br(Assembler::zero, false, Assembler::pt, NonFloatArg);
__ delayed()->nop();
__ cmp(G3_scratch, float_sig );
__ br(Assembler::equal, false, Assembler::pt, LoadFloatArg);
__ delayed()->nop();
__ cmp(G3_scratch, double_sig );
__ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg);
__ delayed()->nop();
__ bind(NonFloatArg);
// There are only 6 integer register arguments!
if ( ldarg.is_register() )
__ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
else {
// Optimization, see if there are any more args and get out prior to checking
// all 16 float registers. My guess is that this is rare.
// If is_register is false, then we are done the first six integer args.
__ br_null_short(G4_scratch, Assembler::pt, done);
}
__ ba(NextArg);
__ delayed()->srl( G4_scratch, 2, G4_scratch );
__ bind(LoadFloatArg);
__ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4);
__ ba(NextArg);
__ delayed()->srl( G4_scratch, 2, G4_scratch );
__ bind(LoadDoubleArg);
__ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() );
__ ba(NextArg);
__ delayed()->srl( G4_scratch, 2, G4_scratch );
__ bind(NextArg);
}
__ bind(done);
__ ret();
__ delayed()->
restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
return entry;
}
#endif
void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
// Generate code to initiate compilation on the counter overflow.
// InterpreterRuntime::frequency_counter_overflow takes two arguments,
// the first indicates if the counter overflow occurs at a backwards branch (NULL bcp)
// and the second is only used when the first is true. We pass zero for both.
// The call returns the address of the verified entry point for the method or NULL
// if the compilation did not complete (either went background or bailed out).
__ set((int)false, O2);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
// returns verified_entry_point or NULL
// we ignore it in any case
__ ba_short(Lcontinue);
}
// End of helpers
// Various method entries
// Abstract method entry
// Attempt to execute abstract method. Throw exception
//
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
address entry = __ pc();
// abstract method entry
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry;
}
void TemplateInterpreterGenerator::save_native_result(void) {
// result potentially in O0/O1: save it across calls
@ -911,6 +1091,31 @@ address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(Abstract
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
return NULL;
}
// TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to
// generate exception
void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// Quick & dirty stack overflow checking: bang the stack & handle trap.
// Note that we do the banging after the frame is setup, since the exception
// handling code expects to find a valid interpreter frame on the stack.
// Doing the banging earlier fails if the caller frame is not an interpreter
// frame.
// (Also, the exception throwing code expects to unlock any synchronized
// method receiever, so do the banging after locking the receiver.)
// Bang each page in the shadow zone. We can't assume it's been done for
// an interpreter frame with greater than a page of locals, so each page
// needs to be checked. Only true for non-native.
if (UseStackBanging) {
const int page_size = os::vm_page_size();
const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
const int start_page = native_call ? n_shadow_pages : 1;
for (int pages = start_page; pages <= n_shadow_pages; pages++) {
__ bang_stack_with_offset(pages*page_size);
}
}
}
//
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the native method

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3356,7 +3356,15 @@ void TemplateTable::_new() {
__ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
// increment waste limit to prevent getting stuck on this slow path
__ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) {
__ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
} else {
// set64 does not use the temp register if the given constant is 32 bit. So
// we can just use any register; using G0 results in ignoring of the upper 32 bit
// of that value.
__ set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), G4_scratch, G0);
__ add(RtlabWasteLimitValue, G4_scratch, RtlabWasteLimitValue);
}
__ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
} else {
// No allocation in the shared eden.

View File

@ -27,16 +27,6 @@
#include "interpreter/interpreter.hpp"
#include "runtime/frame.inline.hpp"
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
#ifdef AMD64
int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
#else
int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
#endif // AMD64
// asm based interpreter deoptimization helpers
int AbstractInterpreter::size_activation(int max_stack,

View File

@ -1,53 +0,0 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#define __ _masm->
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
address entry_point = __ pc();
// abstract method entry
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}

View File

@ -1,190 +0,0 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#define __ _masm->
//------------------------------------------------------------------------------------------------------------------------
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
// rbx,: method
// rcx: temporary
// rdi: pointer to locals
// rsp: end of copied parameters area
__ mov(rcx, rsp);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx);
__ ret(0);
return entry;
}
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rbx,: Method*
// rcx: scratrch
// rsi: sender sp
if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
address entry_point = __ pc();
// These don't need a safepoint check because they aren't virtually
// callable. We won't enter these intrinsics from compiled code.
// If in the future we added an intrinsic which was virtually callable
// we'd have to worry about how to safepoint so that this code is used.
// mathematical functions inlined by compiler
// (interpreter must provide identical implementation
// in order to avoid monotonicity bugs when switching
// from interpreter to compiler in the middle of some
// computation)
//
// stack: [ ret adr ] <-- rsp
// [ lo(arg) ]
// [ hi(arg) ]
//
// Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are
// native methods. Interpreter::method_kind(...) does a check for
// native methods first before checking for intrinsic methods and
// thus will never select this entry point. Make sure it is not
// called accidentally since the SharedRuntime entry points will
// not work for JDK 1.2.
//
// We no longer need to check for JDK 1.2 since it's EOL'ed.
// The following check existed in pre 1.6 implementation,
// if (Universe::is_jdk12x_version()) {
// __ should_not_reach_here();
// }
// Universe::is_jdk12x_version() always returns false since
// the JDK version is not yet determined when this method is called.
// This method is called during interpreter_init() whereas
// JDK version is only determined when universe2_init() is called.
// Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are
// java methods. Interpreter::method_kind(...) will select
// this entry point for the corresponding methods in JDK 1.3.
// get argument
__ fld_d(Address(rsp, 1*wordSize));
switch (kind) {
case Interpreter::java_lang_math_sin :
__ trigfunc('s');
break;
case Interpreter::java_lang_math_cos :
__ trigfunc('c');
break;
case Interpreter::java_lang_math_tan :
__ trigfunc('t');
break;
case Interpreter::java_lang_math_sqrt:
__ fsqrt();
break;
case Interpreter::java_lang_math_abs:
__ fabs();
break;
case Interpreter::java_lang_math_log:
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
if (VM_Version::supports_sse2()) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
}
else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dlog)));
}
__ addptr(rsp, 2 * wordSize);
break;
case Interpreter::java_lang_math_log10:
__ flog10();
// Store to stack to convert 80bit precision back to 64bits
__ push_fTOS();
__ pop_fTOS();
break;
case Interpreter::java_lang_math_pow:
__ fld_d(Address(rsp, 3*wordSize)); // second argument
__ subptr(rsp, 4 * wordSize);
__ fstp_d(Address(rsp, 0));
__ fstp_d(Address(rsp, 2 * wordSize));
if (VM_Version::supports_sse2()) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
} else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dpow)));
}
__ addptr(rsp, 4 * wordSize);
break;
case Interpreter::java_lang_math_exp:
__ subptr(rsp, 2*wordSize);
__ fstp_d(Address(rsp, 0));
if (VM_Version::supports_sse2()) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
} else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dexp)));
}
__ addptr(rsp, 2*wordSize);
break;
default :
ShouldNotReachHere();
}
// return double result in xmm0 for interpreter and compilers.
if (UseSSE >= 2) {
__ subptr(rsp, 2*wordSize);
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addptr(rsp, 2*wordSize);
}
// done, result in FPU ST(0) or XMM0
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
return entry_point;
}

View File

@ -1,298 +0,0 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#define __ _masm->
#ifdef _WIN64
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
// rbx: method
// r14: pointer to locals
// c_rarg3: first stack arg - wordSize
__ mov(c_rarg3, rsp);
// adjust rsp
__ subptr(rsp, 4 * wordSize);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler),
rbx, r14, c_rarg3);
// rax: result handler
// Stack layout:
// rsp: 3 integer or float args (if static first is unused)
// 1 float/double identifiers
// return address
// stack args
// garbage
// expression stack bottom
// bcp (NULL)
// ...
// Do FP first so we can use c_rarg3 as temp
__ movl(c_rarg3, Address(rsp, 3 * wordSize)); // float/double identifiers
for ( int i= 0; i < Argument::n_int_register_parameters_c-1; i++ ) {
XMMRegister floatreg = as_XMMRegister(i+1);
Label isfloatordouble, isdouble, next;
__ testl(c_rarg3, 1 << (i*2)); // Float or Double?
__ jcc(Assembler::notZero, isfloatordouble);
// Do Int register here
switch ( i ) {
case 0:
__ movl(rscratch1, Address(rbx, Method::access_flags_offset()));
__ testl(rscratch1, JVM_ACC_STATIC);
__ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
break;
case 1:
__ movptr(c_rarg2, Address(rsp, wordSize));
break;
case 2:
__ movptr(c_rarg3, Address(rsp, 2 * wordSize));
break;
default:
break;
}
__ jmp (next);
__ bind(isfloatordouble);
__ testl(c_rarg3, 1 << ((i*2)+1)); // Double?
__ jcc(Assembler::notZero, isdouble);
// Do Float Here
__ movflt(floatreg, Address(rsp, i * wordSize));
__ jmp(next);
// Do Double here
__ bind(isdouble);
__ movdbl(floatreg, Address(rsp, i * wordSize));
__ bind(next);
}
// restore rsp
__ addptr(rsp, 4 * wordSize);
__ ret(0);
return entry;
}
#else
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
// rbx: method
// r14: pointer to locals
// c_rarg3: first stack arg - wordSize
__ mov(c_rarg3, rsp);
// adjust rsp
__ subptr(rsp, 14 * wordSize);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler),
rbx, r14, c_rarg3);
// rax: result handler
// Stack layout:
// rsp: 5 integer args (if static first is unused)
// 1 float/double identifiers
// 8 double args
// return address
// stack args
// garbage
// expression stack bottom
// bcp (NULL)
// ...
// Do FP first so we can use c_rarg3 as temp
__ movl(c_rarg3, Address(rsp, 5 * wordSize)); // float/double identifiers
for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
const XMMRegister r = as_XMMRegister(i);
Label d, done;
__ testl(c_rarg3, 1 << i);
__ jcc(Assembler::notZero, d);
__ movflt(r, Address(rsp, (6 + i) * wordSize));
__ jmp(done);
__ bind(d);
__ movdbl(r, Address(rsp, (6 + i) * wordSize));
__ bind(done);
}
// Now handle integrals. Only do c_rarg1 if not static.
__ movl(c_rarg3, Address(rbx, Method::access_flags_offset()));
__ testl(c_rarg3, JVM_ACC_STATIC);
__ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
__ movptr(c_rarg2, Address(rsp, wordSize));
__ movptr(c_rarg3, Address(rsp, 2 * wordSize));
__ movptr(c_rarg4, Address(rsp, 3 * wordSize));
__ movptr(c_rarg5, Address(rsp, 4 * wordSize));
// restore rsp
__ addptr(rsp, 14 * wordSize);
__ ret(0);
return entry;
}
#endif
//
// Various method entries
//
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rbx,: Method*
// rcx: scratrch
// r13: sender sp
if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
address entry_point = __ pc();
// These don't need a safepoint check because they aren't virtually
// callable. We won't enter these intrinsics from compiled code.
// If in the future we added an intrinsic which was virtually callable
// we'd have to worry about how to safepoint so that this code is used.
// mathematical functions inlined by compiler
// (interpreter must provide identical implementation
// in order to avoid monotonicity bugs when switching
// from interpreter to compiler in the middle of some
// computation)
//
// stack: [ ret adr ] <-- rsp
// [ lo(arg) ]
// [ hi(arg) ]
//
// Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are
// native methods. Interpreter::method_kind(...) does a check for
// native methods first before checking for intrinsic methods and
// thus will never select this entry point. Make sure it is not
// called accidentally since the SharedRuntime entry points will
// not work for JDK 1.2.
//
// We no longer need to check for JDK 1.2 since it's EOL'ed.
// The following check existed in pre 1.6 implementation,
// if (Universe::is_jdk12x_version()) {
// __ should_not_reach_here();
// }
// Universe::is_jdk12x_version() always returns false since
// the JDK version is not yet determined when this method is called.
// This method is called during interpreter_init() whereas
// JDK version is only determined when universe2_init() is called.
// Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are
// java methods. Interpreter::method_kind(...) will select
// this entry point for the corresponding methods in JDK 1.3.
// get argument
if (kind == Interpreter::java_lang_math_sqrt) {
__ sqrtsd(xmm0, Address(rsp, wordSize));
} else if (kind == Interpreter::java_lang_math_exp) {
__ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
} else if (kind == Interpreter::java_lang_math_log) {
__ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
} else if (kind == Interpreter::java_lang_math_pow) {
__ movdbl(xmm1, Address(rsp, wordSize));
__ movdbl(xmm0, Address(rsp, 3 * wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
} else {
__ fld_d(Address(rsp, wordSize));
switch (kind) {
case Interpreter::java_lang_math_sin :
__ trigfunc('s');
break;
case Interpreter::java_lang_math_cos :
__ trigfunc('c');
break;
case Interpreter::java_lang_math_tan :
__ trigfunc('t');
break;
case Interpreter::java_lang_math_abs:
__ fabs();
break;
case Interpreter::java_lang_math_log10:
__ flog10();
break;
default :
ShouldNotReachHere();
}
// return double result in xmm0 for interpreter and compilers.
__ subptr(rsp, 2*wordSize);
// Round to 64bit precision
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addptr(rsp, 2*wordSize);
}
__ pop(rax);
__ mov(rsp, r13);
__ jmp(rax);
return entry_point;
}

View File

@ -1271,7 +1271,7 @@ static void save_or_restore_arguments(MacroAssembler* masm,
}
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
@ -1284,9 +1284,9 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
OopMapSet* oop_maps,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc");
__ block_comment("check GCLocker::needs_gc");
Label cont;
__ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
__ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
__ jcc(Assembler::equal, cont);
// Save down any incoming oops and call into the runtime to halt for a GC
@ -1469,14 +1469,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point

View File

@ -1416,7 +1416,7 @@ static void save_or_restore_arguments(MacroAssembler* masm,
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
@ -1428,9 +1428,9 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
OopMapSet* oop_maps,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc");
__ block_comment("check GCLocker::needs_gc");
Label cont;
__ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
__ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
__ jcc(Assembler::equal, cont);
// Save down any incoming oops and call into the runtime to halt for a GC
@ -1795,14 +1795,14 @@ static void gen_special_dispatch(MacroAssembler* masm,
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// native call to ensure that they GCLocker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// if (GCLocker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,6 +49,17 @@
#define __ _masm->
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
#ifdef AMD64
int TemplateInterpreter::InterpreterCodeSize = 256 * 1024;
#else
int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
#endif // AMD64
// Global Register Names
static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
@ -57,6 +68,7 @@ const int method_offset = frame::interpreter_frame_method_offset * wordSize;
const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize;
const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
//-----------------------------------------------------------------------------
address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
@ -778,6 +790,30 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
return NULL;
}
// TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to
// generate exception. Windows might need this to map the shadow pages though.
void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// Quick & dirty stack overflow checking: bang the stack & handle trap.
// Note that we do the banging after the frame is setup, since the exception
// handling code expects to find a valid interpreter frame on the stack.
// Doing the banging earlier fails if the caller frame is not an interpreter
// frame.
// (Also, the exception throwing code expects to unlock any synchronized
// method receiever, so do the banging after locking the receiver.)
// Bang each page in the shadow zone. We can't assume it's been done for
// an interpreter frame with greater than a page of locals, so each page
// needs to be checked. Only true for non-native.
if (UseStackBanging) {
const int page_size = os::vm_page_size();
const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
const int start_page = native_call ? n_shadow_pages : 1;
for (int pages = start_page; pages <= n_shadow_pages; pages++) {
__ bang_stack_with_offset(pages*page_size);
}
}
}
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the
// native method than the typical interpreter frame setup.
@ -1304,6 +1340,27 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
return entry_point;
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address TemplateInterpreterGenerator::generate_abstract_entry(void) {
address entry_point = __ pc();
// abstract method entry
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}
//
// Generic interpreted method entry to (asm) interpreter
//

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,12 +26,26 @@
#include "asm/macroAssembler.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp"
#define __ _masm->
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
// rbx,: method
// rcx: temporary
// rdi: pointer to locals
// rsp: end of copied parameters area
__ mov(rcx, rsp);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx);
__ ret(0);
return entry;
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
@ -301,3 +315,105 @@ address TemplateInterpreterGenerator::generate_Double_doubleToRawLongBits_entry(
return NULL;
}
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rbx,: Method*
// rcx: scratrch
// rsi: sender sp
if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
address entry_point = __ pc();
// These don't need a safepoint check because they aren't virtually
// callable. We won't enter these intrinsics from compiled code.
// If in the future we added an intrinsic which was virtually callable
// we'd have to worry about how to safepoint so that this code is used.
// mathematical functions inlined by compiler
// (interpreter must provide identical implementation
// in order to avoid monotonicity bugs when switching
// from interpreter to compiler in the middle of some
// computation)
//
// stack: [ ret adr ] <-- rsp
// [ lo(arg) ]
// [ hi(arg) ]
//
__ fld_d(Address(rsp, 1*wordSize));
switch (kind) {
case Interpreter::java_lang_math_sin :
__ trigfunc('s');
break;
case Interpreter::java_lang_math_cos :
__ trigfunc('c');
break;
case Interpreter::java_lang_math_tan :
__ trigfunc('t');
break;
case Interpreter::java_lang_math_sqrt:
__ fsqrt();
break;
case Interpreter::java_lang_math_abs:
__ fabs();
break;
case Interpreter::java_lang_math_log:
__ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
if (VM_Version::supports_sse2()) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
}
else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dlog)));
}
__ addptr(rsp, 2 * wordSize);
break;
case Interpreter::java_lang_math_log10:
__ flog10();
// Store to stack to convert 80bit precision back to 64bits
__ push_fTOS();
__ pop_fTOS();
break;
case Interpreter::java_lang_math_pow:
__ fld_d(Address(rsp, 3*wordSize)); // second argument
__ subptr(rsp, 4 * wordSize);
__ fstp_d(Address(rsp, 0));
__ fstp_d(Address(rsp, 2 * wordSize));
if (VM_Version::supports_sse2()) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
} else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dpow)));
}
__ addptr(rsp, 4 * wordSize);
break;
case Interpreter::java_lang_math_exp:
__ subptr(rsp, 2*wordSize);
__ fstp_d(Address(rsp, 0));
if (VM_Version::supports_sse2()) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
} else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dexp)));
}
__ addptr(rsp, 2*wordSize);
break;
default :
ShouldNotReachHere();
}
// return double result in xmm0 for interpreter and compilers.
if (UseSSE >= 2) {
__ subptr(rsp, 2*wordSize);
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addptr(rsp, 2*wordSize);
}
// done, result in FPU ST(0) or XMM0
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
return entry_point;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,11 +26,155 @@
#include "asm/macroAssembler.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "runtime/arguments.hpp"
#define __ _masm->
#ifdef _WIN64
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
// rbx: method
// r14: pointer to locals
// c_rarg3: first stack arg - wordSize
__ mov(c_rarg3, rsp);
// adjust rsp
__ subptr(rsp, 4 * wordSize);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler),
rbx, r14, c_rarg3);
// rax: result handler
// Stack layout:
// rsp: 3 integer or float args (if static first is unused)
// 1 float/double identifiers
// return address
// stack args
// garbage
// expression stack bottom
// bcp (NULL)
// ...
// Do FP first so we can use c_rarg3 as temp
__ movl(c_rarg3, Address(rsp, 3 * wordSize)); // float/double identifiers
for ( int i= 0; i < Argument::n_int_register_parameters_c-1; i++ ) {
XMMRegister floatreg = as_XMMRegister(i+1);
Label isfloatordouble, isdouble, next;
__ testl(c_rarg3, 1 << (i*2)); // Float or Double?
__ jcc(Assembler::notZero, isfloatordouble);
// Do Int register here
switch ( i ) {
case 0:
__ movl(rscratch1, Address(rbx, Method::access_flags_offset()));
__ testl(rscratch1, JVM_ACC_STATIC);
__ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
break;
case 1:
__ movptr(c_rarg2, Address(rsp, wordSize));
break;
case 2:
__ movptr(c_rarg3, Address(rsp, 2 * wordSize));
break;
default:
break;
}
__ jmp (next);
__ bind(isfloatordouble);
__ testl(c_rarg3, 1 << ((i*2)+1)); // Double?
__ jcc(Assembler::notZero, isdouble);
// Do Float Here
__ movflt(floatreg, Address(rsp, i * wordSize));
__ jmp(next);
// Do Double here
__ bind(isdouble);
__ movdbl(floatreg, Address(rsp, i * wordSize));
__ bind(next);
}
// restore rsp
__ addptr(rsp, 4 * wordSize);
__ ret(0);
return entry;
}
#else
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
// rbx: method
// r14: pointer to locals
// c_rarg3: first stack arg - wordSize
__ mov(c_rarg3, rsp);
// adjust rsp
__ subptr(rsp, 14 * wordSize);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler),
rbx, r14, c_rarg3);
// rax: result handler
// Stack layout:
// rsp: 5 integer args (if static first is unused)
// 1 float/double identifiers
// 8 double args
// return address
// stack args
// garbage
// expression stack bottom
// bcp (NULL)
// ...
// Do FP first so we can use c_rarg3 as temp
__ movl(c_rarg3, Address(rsp, 5 * wordSize)); // float/double identifiers
for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
const XMMRegister r = as_XMMRegister(i);
Label d, done;
__ testl(c_rarg3, 1 << i);
__ jcc(Assembler::notZero, d);
__ movflt(r, Address(rsp, (6 + i) * wordSize));
__ jmp(done);
__ bind(d);
__ movdbl(r, Address(rsp, (6 + i) * wordSize));
__ bind(done);
}
// Now handle integrals. Only do c_rarg1 if not static.
__ movl(c_rarg3, Address(rbx, Method::access_flags_offset()));
__ testl(c_rarg3, JVM_ACC_STATIC);
__ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
__ movptr(c_rarg2, Address(rsp, wordSize));
__ movptr(c_rarg3, Address(rsp, 2 * wordSize));
__ movptr(c_rarg4, Address(rsp, 3 * wordSize));
__ movptr(c_rarg5, Address(rsp, 4 * wordSize));
// restore rsp
__ addptr(rsp, 14 * wordSize);
__ ret(0);
return entry;
}
#endif // __WIN64
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
@ -193,3 +337,84 @@ address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(Abstract
return NULL;
}
//
// Various method entries
//
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rbx,: Method*
// rcx: scratrch
// r13: sender sp
if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
address entry_point = __ pc();
// These don't need a safepoint check because they aren't virtually
// callable. We won't enter these intrinsics from compiled code.
// If in the future we added an intrinsic which was virtually callable
// we'd have to worry about how to safepoint so that this code is used.
// mathematical functions inlined by compiler
// (interpreter must provide identical implementation
// in order to avoid monotonicity bugs when switching
// from interpreter to compiler in the middle of some
// computation)
//
// stack: [ ret adr ] <-- rsp
// [ lo(arg) ]
// [ hi(arg) ]
//
if (kind == Interpreter::java_lang_math_sqrt) {
__ sqrtsd(xmm0, Address(rsp, wordSize));
} else if (kind == Interpreter::java_lang_math_exp) {
__ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dexp())));
} else if (kind == Interpreter::java_lang_math_log) {
__ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dlog())));
} else if (kind == Interpreter::java_lang_math_pow) {
__ movdbl(xmm1, Address(rsp, wordSize));
__ movdbl(xmm0, Address(rsp, 3 * wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dpow())));
} else {
__ fld_d(Address(rsp, wordSize));
switch (kind) {
case Interpreter::java_lang_math_sin :
__ trigfunc('s');
break;
case Interpreter::java_lang_math_cos :
__ trigfunc('c');
break;
case Interpreter::java_lang_math_tan :
__ trigfunc('t');
break;
case Interpreter::java_lang_math_abs:
__ fabs();
break;
case Interpreter::java_lang_math_log10:
__ flog10();
break;
default :
ShouldNotReachHere();
}
// return double result in xmm0 for interpreter and compilers.
__ subptr(rsp, 2*wordSize);
// Round to 64bit precision
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addptr(rsp, 2*wordSize);
}
__ pop(rax);
__ mov(rsp, r13);
__ jmp(rax);
return entry_point;
}

View File

@ -0,0 +1,124 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/cppInterpreter.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/globalDefinitions.hpp"
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
return true;
}
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;
switch (type) {
case T_BOOLEAN: i = 0; break;
case T_CHAR : i = 1; break;
case T_BYTE : i = 2; break;
case T_SHORT : i = 3; break;
case T_INT : i = 4; break;
case T_LONG : i = 5; break;
case T_VOID : i = 6; break;
case T_FLOAT : i = 7; break;
case T_DOUBLE : i = 8; break;
case T_OBJECT : i = 9; break;
case T_ARRAY : i = 9; break;
default : ShouldNotReachHere();
}
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
"index out of bounds");
return i;
}
// Deoptimization helpers
int AbstractInterpreter::size_activation(int max_stack,
int tempcount,
int extra_args,
int moncount,
int callee_param_count,
int callee_locals,
bool is_top_frame) {
int header_words = InterpreterFrame::header_words;
int monitor_words = moncount * frame::interpreter_frame_monitor_size();
int stack_words = is_top_frame ? max_stack : tempcount;
int callee_extra_locals = callee_locals - callee_param_count;
return header_words + monitor_words + stack_words + callee_extra_locals;
}
void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_locals,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
assert(popframe_extra_args == 0, "what to do?");
assert(!is_top_frame || (!callee_locals && !callee_param_count),
"top frame should have no caller");
// This code must exactly match what InterpreterFrame::build
// does (the full InterpreterFrame::build, that is, not the
// one that creates empty frames for the deoptimizer).
//
// interpreter_frame will be filled in. It's size is determined by
// a previous call to the size_activation() method,
//
// Note that tempcount is the current size of the expression
// stack. For top most frames we will allocate a full sized
// expression stack and not the trimmed version that non-top
// frames have.
int monitor_words = moncount * frame::interpreter_frame_monitor_size();
intptr_t *locals = interpreter_frame->fp() + method->max_locals();
interpreterState istate = interpreter_frame->get_interpreterState();
intptr_t *monitor_base = (intptr_t*) istate;
intptr_t *stack_base = monitor_base - monitor_words;
intptr_t *stack = stack_base - tempcount - 1;
BytecodeInterpreter::layout_interpreterState(istate,
caller,
NULL,
method,
locals,
stack,
stack_base,
monitor_base,
NULL,
is_top_frame);
}
// Helper for (runtime) stack overflow checks
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
return 0;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2008 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "interp_masm_zero.hpp"
#include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/bytecodeInterpreter.inline.hpp"
#include "interpreter/interpreter.hpp"
@ -33,8 +32,6 @@
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
@ -68,4 +65,40 @@ const char *BytecodeInterpreter::name_of_field_at_address(address addr) {
return NULL;
}
void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
frame* caller,
frame* current,
Method* method,
intptr_t* locals,
intptr_t* stack,
intptr_t* stack_base,
intptr_t* monitor_base,
intptr_t* frame_bottom,
bool is_top_frame) {
istate->set_locals(locals);
istate->set_method(method);
istate->set_self_link(istate);
istate->set_prev_link(NULL);
// thread will be set by a hacky repurposing of frame::patch_pc()
// bcp will be set by vframeArrayElement::unpack_on_stack()
istate->set_constants(method->constants()->cache());
istate->set_msg(BytecodeInterpreter::method_resume);
istate->set_bcp_advance(0);
istate->set_oop_temp(NULL);
istate->set_mdx(NULL);
if (caller->is_interpreted_frame()) {
interpreterState prev = caller->get_interpreterState();
prev->set_callee(method);
if (*prev->bcp() == Bytecodes::_invokeinterface)
prev->set_bcp_advance(5);
else
prev->set_bcp_advance(3);
}
istate->set_callee(NULL);
istate->set_monitor_base((BasicObjectLock *) monitor_base);
istate->set_stack_base(stack_base);
istate->set_stack(stack);
istate->set_stack_limit(stack_base - method->max_stack() - 1);
}
#endif // CC_INTERP

View File

@ -27,32 +27,12 @@
#include "asm/assembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/cppInterpreterGenerator.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef CC_INTERP
#include "interpreter/cppInterpreter.hpp"
#endif
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address CppInterpreterGenerator::generate_slow_signature_handler() {
_masm->advance(1);
return (address) InterpreterRuntime::slow_signature_handler;
}
@ -70,6 +50,44 @@ address CppInterpreterGenerator::generate_abstract_entry() {
return generate_entry((address) ShouldNotCallThisEntry());
}
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
return true;
address CppInterpreterGenerator::generate_empty_entry() {
if (!UseFastEmptyMethods)
return NULL;
return generate_entry((address) CppInterpreter::empty_entry);
}
address CppInterpreterGenerator::generate_accessor_entry() {
if (!UseFastAccessorMethods)
return NULL;
return generate_entry((address) CppInterpreter::accessor_entry);
}
address CppInterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
// We need to generate have a routine that generates code to:
// * load the value in the referent field
// * passes that value to the pre-barrier.
//
// In the case of G1 this will record the value of the
// referent in an SATB buffer if marking is active.
// This will cause concurrent marking to mark the referent
// field as live.
Unimplemented();
}
#endif // INCLUDE_ALL_GCS
// If G1 is not enabled then attempt to go through the normal entry point
// Reference.get could be instrumented by jvmti
return NULL;
}
address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
return generate_entry((address) CppInterpreter::native_entry);
}
address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
return generate_entry((address) CppInterpreter::normal_entry);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -747,92 +747,6 @@ InterpreterFrame *InterpreterFrame::build(Method* const method, TRAPS) {
return (InterpreterFrame *) fp;
}
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;
switch (type) {
case T_BOOLEAN: i = 0; break;
case T_CHAR : i = 1; break;
case T_BYTE : i = 2; break;
case T_SHORT : i = 3; break;
case T_INT : i = 4; break;
case T_LONG : i = 5; break;
case T_VOID : i = 6; break;
case T_FLOAT : i = 7; break;
case T_DOUBLE : i = 8; break;
case T_OBJECT : i = 9; break;
case T_ARRAY : i = 9; break;
default : ShouldNotReachHere();
}
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
"index out of bounds");
return i;
}
BasicType CppInterpreter::result_type_of(Method* method) {
BasicType t;
switch (method->result_index()) {
case 0 : t = T_BOOLEAN; break;
case 1 : t = T_CHAR; break;
case 2 : t = T_BYTE; break;
case 3 : t = T_SHORT; break;
case 4 : t = T_INT; break;
case 5 : t = T_LONG; break;
case 6 : t = T_VOID; break;
case 7 : t = T_FLOAT; break;
case 8 : t = T_DOUBLE; break;
case 9 : t = T_OBJECT; break;
default: ShouldNotReachHere();
}
assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
"out of step with AbstractInterpreter::BasicType_as_index");
return t;
}
address CppInterpreterGenerator::generate_empty_entry() {
if (!UseFastEmptyMethods)
return NULL;
return generate_entry((address) CppInterpreter::empty_entry);
}
address CppInterpreterGenerator::generate_accessor_entry() {
if (!UseFastAccessorMethods)
return NULL;
return generate_entry((address) CppInterpreter::accessor_entry);
}
address CppInterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
// We need to generate have a routine that generates code to:
// * load the value in the referent field
// * passes that value to the pre-barrier.
//
// In the case of G1 this will record the value of the
// referent in an SATB buffer if marking is active.
// This will cause concurrent marking to mark the referent
// field as live.
Unimplemented();
}
#endif // INCLUDE_ALL_GCS
// If G1 is not enabled then attempt to go through the normal entry point
// Reference.get could be instrumented by jvmti
return NULL;
}
address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
return generate_entry((address) CppInterpreter::native_entry);
}
address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
return generate_entry((address) CppInterpreter::normal_entry);
}
// Deoptimization helpers
InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
@ -858,101 +772,24 @@ InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
return (InterpreterFrame *) fp;
}
int AbstractInterpreter::size_activation(int max_stack,
int tempcount,
int extra_args,
int moncount,
int callee_param_count,
int callee_locals,
bool is_top_frame) {
int header_words = InterpreterFrame::header_words;
int monitor_words = moncount * frame::interpreter_frame_monitor_size();
int stack_words = is_top_frame ? max_stack : tempcount;
int callee_extra_locals = callee_locals - callee_param_count;
return header_words + monitor_words + stack_words + callee_extra_locals;
}
void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_locals,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
assert(popframe_extra_args == 0, "what to do?");
assert(!is_top_frame || (!callee_locals && !callee_param_count),
"top frame should have no caller");
// This code must exactly match what InterpreterFrame::build
// does (the full InterpreterFrame::build, that is, not the
// one that creates empty frames for the deoptimizer).
//
// interpreter_frame will be filled in. It's size is determined by
// a previous call to the size_activation() method,
//
// Note that tempcount is the current size of the expression
// stack. For top most frames we will allocate a full sized
// expression stack and not the trimmed version that non-top
// frames have.
int monitor_words = moncount * frame::interpreter_frame_monitor_size();
intptr_t *locals = interpreter_frame->fp() + method->max_locals();
interpreterState istate = interpreter_frame->get_interpreterState();
intptr_t *monitor_base = (intptr_t*) istate;
intptr_t *stack_base = monitor_base - monitor_words;
intptr_t *stack = stack_base - tempcount - 1;
BytecodeInterpreter::layout_interpreterState(istate,
caller,
NULL,
method,
locals,
stack,
stack_base,
monitor_base,
NULL,
is_top_frame);
}
void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
frame* caller,
frame* current,
Method* method,
intptr_t* locals,
intptr_t* stack,
intptr_t* stack_base,
intptr_t* monitor_base,
intptr_t* frame_bottom,
bool is_top_frame) {
istate->set_locals(locals);
istate->set_method(method);
istate->set_self_link(istate);
istate->set_prev_link(NULL);
// thread will be set by a hacky repurposing of frame::patch_pc()
// bcp will be set by vframeArrayElement::unpack_on_stack()
istate->set_constants(method->constants()->cache());
istate->set_msg(BytecodeInterpreter::method_resume);
istate->set_bcp_advance(0);
istate->set_oop_temp(NULL);
istate->set_mdx(NULL);
if (caller->is_interpreted_frame()) {
interpreterState prev = caller->get_interpreterState();
prev->set_callee(method);
if (*prev->bcp() == Bytecodes::_invokeinterface)
prev->set_bcp_advance(5);
else
prev->set_bcp_advance(3);
BasicType CppInterpreter::result_type_of(Method* method) {
BasicType t;
switch (method->result_index()) {
case 0 : t = T_BOOLEAN; break;
case 1 : t = T_CHAR; break;
case 2 : t = T_BYTE; break;
case 3 : t = T_SHORT; break;
case 4 : t = T_INT; break;
case 5 : t = T_LONG; break;
case 6 : t = T_VOID; break;
case 7 : t = T_FLOAT; break;
case 8 : t = T_DOUBLE; break;
case 9 : t = T_OBJECT; break;
default: ShouldNotReachHere();
}
istate->set_callee(NULL);
istate->set_monitor_base((BasicObjectLock *) monitor_base);
istate->set_stack_base(stack_base);
istate->set_stack(stack);
istate->set_stack_limit(stack_base - method->max_stack() - 1);
assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
"out of step with AbstractInterpreter::BasicType_as_index");
return t;
}
address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
@ -964,12 +801,6 @@ address CppInterpreter::deopt_entry(TosState state, int length) {
return NULL;
}
// Helper for (runtime) stack overflow checks
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
return 0;
}
// Helper for figuring out if frames are interpreter frames
bool CppInterpreter::contains(address pc) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -25,9 +25,16 @@
#include "precompiled.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "runtime/thread.hpp"
#include "stack_zero.hpp"
#include "stack_zero.inline.hpp"
// Inlined causes circular inclusion with thread.hpp
ZeroStack::ZeroStack()
: _base(NULL), _top(NULL), _sp(NULL) {
_shadow_pages_size = JavaThread::stack_shadow_zone_size();
}
int ZeroStack::suggest_size(Thread *thread) const {
assert(needs_setup(), "already set up");
int abi_available = abi_stack_available(thread);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -38,10 +38,7 @@ class ZeroStack {
int _shadow_pages_size; // how much ABI stack must we keep free?
public:
ZeroStack()
: _base(NULL), _top(NULL), _sp(NULL) {
_shadow_pages_size = JavaThread::stack_shadow_zone_size();
}
ZeroStack();
bool needs_setup() const {
return _base == NULL;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -49,11 +49,10 @@ inline void ZeroStack::overflow_check(int required_words, TRAPS) {
// value can be negative.
inline int ZeroStack::abi_stack_available(Thread *thread) const {
guarantee(Thread::current() == thread, "should run in the same thread");
assert(thread->stack_size() -
(thread->stack_base() - (address) &stack_used +
JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size()) ==
(address)&stack_used - thread->stack_overflow_limit(), "sanity");
return (address)&stack_used - stack_overflow_limit();
int stack_used = thread->stack_base() - (address) &stack_used
+ (JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size());
int stack_free = thread->stack_size() - stack_used;
return stack_free;
}
#endif // CPU_ZERO_VM_STACK_ZERO_INLINE_HPP

View File

@ -171,6 +171,7 @@ lib_info* add_lib_info_fd(struct ps_prochandle* ph, const char* libname, int fd,
if (strlen(libname) >= sizeof(newlib->name)) {
print_debug("libname %s too long\n", libname);
free(newlib);
return NULL;
}
strcpy(newlib->name, libname);

View File

@ -217,6 +217,7 @@ lib_info* add_lib_info_fd(struct ps_prochandle* ph, const char* libname, int fd,
if (strlen(libname) >= sizeof(newlib->name)) {
print_debug("libname %s too long\n", libname);
free(newlib);
return NULL;
}
strcpy(newlib->name, libname);

View File

@ -36,13 +36,15 @@ class AIXDecoder: public AbstractDecoder {
virtual bool can_decode_C_frame_in_vm() const { return true; }
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // demangled by getFuncName
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
return (::getFuncName((codeptr_t)addr, buf, buflen, offset, 0, 0, 0, demangle) == 0);
return AixSymbols::get_function_name(addr, buf, buflen, offset, 0, demangle);
}
virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
ShouldNotReachHere();
return false;
}
};

View File

@ -41,7 +41,6 @@
fputc('\n', stderr); fflush(stderr); \
} \
}
#define ERRBYE(s) { trcVerbose(s); return -1; }
#define assert0(b) assert((b), "")
#define guarantee0(b) guarantee((b), "")

View File

@ -130,61 +130,10 @@ extern "C" int getargs (procsinfo*, int, char*, int);
#define ERROR_MP_VMGETINFO_FAILED 102
#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
// The semantics in this file are thus that codeptr_t is a *real code ptr*.
// This means that any function taking codeptr_t as arguments will assume
// a real codeptr and won't handle function descriptors (eg getFuncName),
// whereas functions taking address as args will deal with function
// descriptors (eg os::dll_address_to_library_name).
typedef unsigned int* codeptr_t;
// Typedefs for stackslots, stack pointers, pointers to op codes.
typedef unsigned long stackslot_t;
typedef stackslot_t* stackptr_t;
// Query dimensions of the stack of the calling thread.
static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
static address resolve_function_descriptor_to_code_pointer(address p);
// Function to check a given stack pointer against given stack limits.
inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
if (((uintptr_t)sp) & 0x7) {
return false;
}
if (sp > stack_base) {
return false;
}
if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
return false;
}
return true;
}
// Returns true if function is a valid codepointer.
inline bool is_valid_codepointer(codeptr_t p) {
if (!p) {
return false;
}
if (((uintptr_t)p) & 0x3) {
return false;
}
if (LoadedLibraries::find_for_text_address(p, NULL) == NULL) {
return false;
}
return true;
}
// Macro to check a given stack pointer against given stack limits and to die if test fails.
#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
}
// Macro to check the current stack pointer against given stacklimits.
#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
address sp; \
sp = os::current_stack_pointer(); \
CHECK_STACK_PTR(sp, stack_base, stack_size); \
}
static void vmembk_print_on(outputStream* os);
////////////////////////////////////////////////////////////////////////////////
@ -859,9 +808,6 @@ static void *java_start(Thread *thread) {
trcVerbose("Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
}
// Do some sanity checks.
CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
// Try to randomize the cache line index of hot stack frames.
// This helps when threads of the same stack traces evict each other's
// cache lines. The threads can be either from the same JVM instance, or
@ -1028,9 +974,6 @@ bool os::create_attached_thread(JavaThread* thread) {
// initialize floating point control register
os::Aix::init_thread_fpu_state();
// some sanity checks
CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
// Initial thread state is RUNNABLE
osthread->set_state(RUNNABLE);
@ -1382,32 +1325,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
return false;
}
// Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
return Decoder::decode(addr, buf, buflen, offset, demangle);
}
static int getModuleName(codeptr_t pc, // [in] program counter
char* p_name, size_t namelen, // [out] optional: function name
char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
) {
if (p_name && namelen > 0) {
*p_name = '\0';
}
if (p_errmsg && errmsglen > 0) {
*p_errmsg = '\0';
}
if (p_name && namelen > 0) {
loaded_module_t lm;
if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
strncpy(p_name, lm.shortname, namelen);
p_name[namelen - 1] = '\0';
}
return 0;
}
return -1;
return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
}
bool os::dll_address_to_library_name(address addr, char* buf,
@ -1425,10 +1343,7 @@ bool os::dll_address_to_library_name(address addr, char* buf,
return false;
}
if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
return true;
}
return false;
return AixSymbols::get_module_name(addr, buf, buflen);
}
// Loads .dll/.so and in case of error it checks if .dll/.so was built
@ -3827,7 +3742,7 @@ bool os::find(address addr, outputStream* st) {
loaded_module_t lm;
if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
st->print("%s", lm.path);
st->print_cr("%s", lm.path);
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -65,7 +65,7 @@ inline void os::pd_split_reserved_memory(char *base, size_t size,
}
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::bang_stack_shadow_pages() {
inline void os::map_stack_shadow_pages() {
}
inline void os::dll_unload(void *lib) {

View File

@ -23,30 +23,35 @@
*/
#include "asm/assembler.hpp"
#include "compiler/disassembler.hpp"
#include "loadlib_aix.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
// For CritSect
#include "misc_aix.hpp"
#include "porting_aix.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
#include <demangle.h>
#include <sys/debug.h>
#include <ucontext.h>
//////////////////////////////////
// Provide implementation for dladdr based on LoadedLibraries pool and
// traceback table scan (see getFuncName).
// traceback table scan
// Search traceback table in stack,
// return procedure name from trace back table.
#define MAX_FUNC_SEARCH_LEN 0x10000
// Any PC below this value is considered toast.
#define MINIMUM_VALUE_FOR_PC ((unsigned int*)0x1024)
#define PTRDIFF_BYTES(p1,p2) (((ptrdiff_t)p1) - ((ptrdiff_t)p2))
// Typedefs for stackslots, stack pointers, pointers to op codes.
typedef unsigned long stackslot_t;
typedef stackslot_t* stackptr_t;
typedef unsigned int* codeptr_t;
// Unfortunately, the interface of dladdr makes the implementator
// responsible for maintaining memory for function name/library
// name. I guess this is because most OS's keep those values as part
@ -91,15 +96,12 @@ class fixed_strings {
static fixed_strings dladdr_fixed_strings;
// Given a code pointer, returns the function name and the displacement.
// Function looks for the traceback table at the end of the function.
extern "C" int getFuncName(
codeptr_t pc, // [in] program counter
bool AixSymbols::get_function_name (
address pc0, // [in] program counter
char* p_name, size_t namelen, // [out] optional: function name ("" if not available)
int* p_displacement, // [out] optional: displacement (-1 if not available)
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further
// information (NULL if not available)
char* p_errmsg, size_t errmsglen,// [out] optional: user provided buffer for error messages
bool demangle // [in] whether to demangle the name
) {
struct tbtable* tb = 0;
@ -109,9 +111,6 @@ extern "C" int getFuncName(
if (p_name && namelen > 0) {
*p_name = '\0';
}
if (p_errmsg && errmsglen > 0) {
*p_errmsg = '\0';
}
if (p_displacement) {
*p_displacement = -1;
}
@ -119,9 +118,12 @@ extern "C" int getFuncName(
*p_tb = NULL;
}
codeptr_t pc = (codeptr_t)pc0;
// weed out obvious bogus states
if (pc < MINIMUM_VALUE_FOR_PC) {
ERRBYE("invalid program counter");
if (pc < (codeptr_t)0x1000) {
trcVerbose("invalid program counter");
return false;
}
// We see random but frequent crashes in this function since some months mainly on shutdown
@ -130,7 +132,8 @@ extern "C" int getFuncName(
// As the pc cannot be trusted to be anything sensible lets make all reads via SafeFetch. Also
// bail if this is not a text address right now.
if (!LoadedLibraries::find_for_text_address(pc, NULL)) {
ERRBYE("not a text address");
trcVerbose("not a text address");
return false;
}
// .. (Note that is_readable_pointer returns true if safefetch stubs are not there yet;
@ -138,10 +141,11 @@ extern "C" int getFuncName(
// error files than not having a callstack.)
#define CHECK_POINTER_READABLE(p) \
if (!MiscUtils::is_readable_pointer(p)) { \
ERRBYE("pc not readable"); \
trcVerbose("pc not readable"); \
return false; \
}
codeptr_t pc2 = pc;
codeptr_t pc2 = (codeptr_t) pc;
// Make sure the pointer is word aligned.
pc2 = (codeptr_t) align_ptr_up((char*)pc2, 4);
@ -154,7 +158,8 @@ extern "C" int getFuncName(
pc2++;
}
if (*pc2 != 0) {
ERRBYE("no traceback table found");
trcVerbose("no traceback table found");
return false;
}
//
// Set up addressability to the traceback table
@ -166,7 +171,8 @@ extern "C" int getFuncName(
if (tb->tb.lang >= 0xf && tb->tb.lang <= 0xfb) {
// Language specifiers, go from 0 (C) to 14 (Objective C).
// According to spec, 0xf-0xfa reserved, 0xfb-0xff reserved for ibm.
ERRBYE("no traceback table found");
trcVerbose("no traceback table found");
return false;
}
// Existence of fields in the tbtable extension are contingent upon
@ -188,7 +194,8 @@ extern "C" int getFuncName(
// Weed out the cases where we did find the wrong traceback table.
if (pc < start_of_procedure) {
ERRBYE("no traceback table found");
trcVerbose("no traceback table found");
return false;
}
// return the displacement
@ -218,23 +225,19 @@ extern "C" int getFuncName(
if (p_name && namelen > 0) {
if (tb->tb.name_present) {
// Copy name from text because it may not be zero terminated.
// 256 is good enough for most cases; do not use large buffers here.
char buf[256];
const short l = MIN2<short>(*((short*)pc2), sizeof(buf) - 1);
const short l = MIN2<short>(*((short*)pc2), namelen - 1);
// Be very careful.
int i = 0; char* const p = (char*)pc2 + sizeof(short);
while (i < l && MiscUtils::is_readable_pointer(p + i)) {
buf[i] = p[i];
p_name[i] = p[i];
i++;
}
buf[i] = '\0';
p_name[0] = '\0';
p_name[i] = '\0';
// If it is a C++ name, try and demangle it using the Demangle interface (see demangle.h).
if (demangle) {
char* rest;
Name* const name = Demangle(buf, rest);
Name* const name = Demangle(p_name, rest);
if (name) {
const char* const demangled_name = name->Text();
if (demangled_name) {
@ -244,24 +247,35 @@ extern "C" int getFuncName(
delete name;
}
}
// Fallback: if demangling did not work, just provide the unmangled name.
if (p_name[0] == '\0') {
strncpy(p_name, buf, namelen-1);
p_name[namelen-1] = '\0';
}
} else {
strncpy(p_name, "<nameless function>", namelen-1);
p_name[namelen-1] = '\0';
}
}
// Return traceback table, if user wants it.
if (p_tb) {
(*p_tb) = tb;
}
return 0;
return true;
}
bool AixSymbols::get_module_name(address pc,
char* p_name, size_t namelen) {
if (p_name && namelen > 0) {
p_name[0] = '\0';
loaded_module_t lm;
if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
strncpy(p_name, lm.shortname, namelen);
p_name[namelen - 1] = '\0';
return true;
}
}
return false;
}
// Special implementation of dladdr for Aix based on LoadedLibraries
@ -341,8 +355,8 @@ int dladdr(void* addr, Dl_info* info) {
char funcname[256] = "";
int displacement = 0;
if (getFuncName((codeptr_t) p, funcname, sizeof(funcname), &displacement,
NULL, NULL, 0, false) == 0) {
if (AixSymbols::get_function_name(p, funcname, sizeof(funcname),
&displacement, NULL, true)) {
if (funcname[0] != '\0') {
const char* const interned = dladdr_fixed_strings.intern(funcname);
info->dli_sname = interned;
@ -385,3 +399,414 @@ int dladdr(void* addr, Dl_info* info) {
return rc; // error: return 0 [sic]
}
/////////////////////////////////////////////////////////////////////////////
// Native callstack dumping
// Print the traceback table for one stack frame.
static void print_tbtable (outputStream* st, const struct tbtable* p_tb) {
if (p_tb == NULL) {
st->print("<null>");
return;
}
switch(p_tb->tb.lang) {
case TB_C: st->print("C"); break;
case TB_FORTRAN: st->print("FORTRAN"); break;
case TB_PASCAL: st->print("PASCAL"); break;
case TB_ADA: st->print("ADA"); break;
case TB_PL1: st->print("PL1"); break;
case TB_BASIC: st->print("BASIC"); break;
case TB_LISP: st->print("LISP"); break;
case TB_COBOL: st->print("COBOL"); break;
case TB_MODULA2: st->print("MODULA2"); break;
case TB_CPLUSPLUS: st->print("C++"); break;
case TB_RPG: st->print("RPG"); break;
case TB_PL8: st->print("PL8"); break;
case TB_ASM: st->print("ASM"); break;
case TB_HPJ: st->print("HPJ"); break;
default: st->print("unknown");
}
st->print(" ");
if (p_tb->tb.globallink) {
st->print("globallink ");
}
if (p_tb->tb.is_eprol) {
st->print("eprol ");
}
if (p_tb->tb.int_proc) {
st->print("int_proc ");
}
if (p_tb->tb.tocless) {
st->print("tocless ");
}
if (p_tb->tb.fp_present) {
st->print("fp_present ");
}
if (p_tb->tb.int_hndl) {
st->print("interrupt_handler ");
}
if (p_tb->tb.uses_alloca) {
st->print("uses_alloca ");
}
if (p_tb->tb.saves_cr) {
st->print("saves_cr ");
}
if (p_tb->tb.saves_lr) {
st->print("saves_lr ");
}
if (p_tb->tb.stores_bc) {
st->print("stores_bc ");
}
if (p_tb->tb.fixup) {
st->print("fixup ");
}
if (p_tb->tb.fpr_saved > 0) {
st->print("fpr_saved:%d ", p_tb->tb.fpr_saved);
}
if (p_tb->tb.gpr_saved > 0) {
st->print("gpr_saved:%d ", p_tb->tb.gpr_saved);
}
if (p_tb->tb.fixedparms > 0) {
st->print("fixedparms:%d ", p_tb->tb.fixedparms);
}
if (p_tb->tb.floatparms > 0) {
st->print("floatparms:%d ", p_tb->tb.floatparms);
}
if (p_tb->tb.parmsonstk > 0) {
st->print("parmsonstk:%d", p_tb->tb.parmsonstk);
}
}
// Print information for pc (module, function, displacement, traceback table)
// on one line.
static void print_info_for_pc (outputStream* st, codeptr_t pc, char* buf,
size_t buf_size, bool demangle) {
const struct tbtable* tb = NULL;
int displacement = -1;
if (!MiscUtils::is_readable_pointer(pc)) {
st->print("(invalid)");
return;
}
if (AixSymbols::get_module_name((address)pc, buf, buf_size)) {
st->print("%s", buf);
} else {
st->print("(unknown module)");
}
st->print("::");
if (AixSymbols::get_function_name((address)pc, buf, buf_size,
&displacement, &tb, demangle)) {
st->print("%s", buf);
} else {
st->print("(unknown function)");
}
if (displacement == -1) {
st->print("+?");
} else {
st->print("+0x%x", displacement);
}
if (tb) {
st->fill_to(64);
st->print(" (");
print_tbtable(st, tb);
st->print(")");
}
}
static void print_stackframe(outputStream* st, stackptr_t sp, char* buf,
size_t buf_size, bool demangle) {
stackptr_t sp2 = sp;
// skip backchain
sp2++;
// skip crsave
sp2++;
// retrieve lrsave. That is the only info I need to get the function/displacement
codeptr_t lrsave = (codeptr_t) *(sp2);
st->print (PTR64_FORMAT " - " PTR64_FORMAT " ", sp2, lrsave);
if (lrsave != NULL) {
print_info_for_pc(st, lrsave, buf, buf_size, demangle);
}
}
// Function to check a given stack pointer against given stack limits.
static bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
if (((uintptr_t)sp) & 0x7) {
return false;
}
if (sp > stack_base) {
return false;
}
if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
return false;
}
return true;
}
// Returns true if function is a valid codepointer.
static bool is_valid_codepointer(codeptr_t p) {
if (!p) {
return false;
}
if (((uintptr_t)p) & 0x3) {
return false;
}
if (LoadedLibraries::find_for_text_address(p, NULL) == NULL) {
return false;
}
return true;
}
// Function tries to guess if the given combination of stack pointer, stack base
// and stack size is a valid stack frame.
static bool is_valid_frame (stackptr_t p, stackptr_t stack_base, size_t stack_size) {
if (!is_valid_stackpointer(p, stack_base, stack_size)) {
return false;
}
// First check - the occurrence of a valid backchain pointer up the stack, followed by a
// valid codeptr, counts as a good candidate.
stackptr_t sp2 = (stackptr_t) *p;
if (is_valid_stackpointer(sp2, stack_base, stack_size) && // found a valid stack pointer in the stack...
((sp2 - p) > 6) && // ... pointing upwards and not into my frame...
is_valid_codepointer((codeptr_t)(*(sp2 + 2)))) // ... followed by a code pointer after two slots...
{
return true;
}
return false;
}
// Try to relocate a stack back chain in a given stack.
// Used in callstack dumping, when the backchain is broken by an overwriter
static stackptr_t try_find_backchain (stackptr_t last_known_good_frame,
stackptr_t stack_base, size_t stack_size)
{
if (!is_valid_stackpointer(last_known_good_frame, stack_base, stack_size)) {
return NULL;
}
stackptr_t sp = last_known_good_frame;
sp += 6; // Omit next fixed frame slots.
while (sp < stack_base) {
if (is_valid_frame(sp, stack_base, stack_size)) {
return sp;
}
sp ++;
}
return NULL;
}
static void decode_instructions_at_pc(const char* header,
codeptr_t pc, int num_before,
int num_after, outputStream* st) {
// TODO: PPC port Disassembler::decode(pc, 16, 16, st);
}
void AixNativeCallstack::print_callstack_for_context(outputStream* st, const ucontext_t* context,
bool demangle, char* buf, size_t buf_size) {
#define MAX_CALLSTACK_DEPTH 50
unsigned long* sp;
unsigned long* sp_last;
int frame;
// To print the first frame, use the current value of iar:
// current entry indicated by iar (the current pc)
codeptr_t cur_iar = 0;
stackptr_t cur_sp = 0;
codeptr_t cur_rtoc = 0;
codeptr_t cur_lr = 0;
const ucontext_t* uc = (const ucontext_t*) context;
// fallback: use the current context
ucontext_t local_context;
if (!uc) {
if (getcontext(&local_context) == 0) {
uc = &local_context;
} else {
st->print_cr("No context given and getcontext failed. ");
return;
}
}
cur_iar = (codeptr_t)uc->uc_mcontext.jmp_context.iar;
cur_sp = (stackptr_t)uc->uc_mcontext.jmp_context.gpr[1];
cur_rtoc = (codeptr_t)uc->uc_mcontext.jmp_context.gpr[2];
cur_lr = (codeptr_t)uc->uc_mcontext.jmp_context.lr;
// syntax used here:
// n -------------- <-- stack_base, stack_to
// n-1 | |
// ... | older |
// ... | frames | |
// | | | stack grows downward
// ... | younger | |
// ... | frames | V
// | |
// |------------| <-- cur_sp, current stack ptr
// | |
// | unsused |
// | stack |
// | |
// . .
// . .
// . .
// . .
// | |
// 0 -------------- <-- stack_from
//
// Retrieve current stack base, size from the current thread. If there is none,
// retrieve it from the OS.
stackptr_t stack_base = NULL;
size_t stack_size = NULL;
Thread* const thread = Thread::current_or_null_safe();
if (thread) {
stack_base = (stackptr_t) thread->stack_base();
stack_size = thread->stack_size();
} else {
stack_base = (stackptr_t) os::current_stack_base();
stack_size = os::current_stack_size();
}
st->print_cr("------ current frame:");
st->print("iar: " PTR64_FORMAT " ", p2i(cur_iar));
print_info_for_pc(st, cur_iar, buf, buf_size, demangle);
st->cr();
if (cur_iar && MiscUtils::is_readable_pointer(cur_iar)) {
decode_instructions_at_pc(
"Decoded instructions at iar:",
cur_iar, 32, 16, st);
}
// Print out lr too, which may be interesting if we did jump to some bogus location;
// in those cases the new frame is not built up yet and the caller location is only
// preserved via lr register.
st->print("lr: " PTR64_FORMAT " ", p2i(cur_lr));
print_info_for_pc(st, cur_lr, buf, buf_size, demangle);
st->cr();
if (cur_lr && MiscUtils::is_readable_pointer(cur_lr)) {
decode_instructions_at_pc(
"Decoded instructions at lr:",
cur_lr, 32, 16, st);
}
// Check and print sp.
st->print("sp: " PTR64_FORMAT " ", p2i(cur_sp));
if (!is_valid_stackpointer(cur_sp, stack_base, stack_size)) {
st->print("(invalid) ");
goto cleanup;
} else {
st->print("(base - 0x%X) ", PTRDIFF_BYTES(stack_base, cur_sp));
}
st->cr();
// Check and print rtoc.
st->print("rtoc: " PTR64_FORMAT " ", p2i(cur_rtoc));
if (cur_rtoc == NULL || cur_rtoc == (codeptr_t)-1 ||
!MiscUtils::is_readable_pointer(cur_rtoc)) {
st->print("(invalid)");
} else if (((uintptr_t)cur_rtoc) & 0x7) {
st->print("(unaligned)");
}
st->cr();
st->print_cr("|---stackaddr----| |----lrsave------|: <function name>");
///
// Walk callstack.
//
// (if no context was given, use the current stack)
sp = (unsigned long*)(*(unsigned long*)cur_sp); // Stack pointer
sp_last = cur_sp;
frame = 0;
while (frame < MAX_CALLSTACK_DEPTH) {
// Check sp.
bool retry = false;
if (sp == NULL) {
// The backchain pointer was NULL. This normally means the end of the chain. But the
// stack might be corrupted, and it may be worth looking for the stack chain.
if (is_valid_stackpointer(sp_last, stack_base, stack_size) && (stack_base - 0x10) > sp_last) {
// If we are not within <guess> 0x10 stackslots of the stack base, we assume that this
// is indeed not the end of the chain but that the stack was corrupted. So lets try to
// find the end of the chain.
st->print_cr("*** back chain pointer is NULL - end of stack or broken backchain ? ***");
retry = true;
} else {
st->print_cr("*** end of backchain ***");
goto end_walk_callstack;
}
} else if (!is_valid_stackpointer(sp, stack_base, stack_size)) {
st->print_cr("*** stack pointer invalid - backchain corrupted (" PTR_FORMAT ") ***", p2i(sp));
retry = true;
} else if (sp < sp_last) {
st->print_cr("invalid stack pointer: " PTR_FORMAT " (not monotone raising)", p2i(sp));
retry = true;
}
// If backchain is broken, try to recover, by manually scanning the stack for a pattern
// which looks like a valid stack.
if (retry) {
st->print_cr("trying to recover and find backchain...");
sp = try_find_backchain(sp_last, stack_base, stack_size);
if (sp) {
st->print_cr("found something which looks like a backchain at " PTR64_FORMAT ", after 0x%x bytes... ",
p2i(sp), PTRDIFF_BYTES(sp, sp_last));
} else {
st->print_cr("did not find a backchain, giving up.");
goto end_walk_callstack;
}
}
// Print stackframe.
print_stackframe(st, sp, buf, buf_size, demangle);
st->cr();
frame ++;
// Next stack frame and link area.
sp_last = sp;
sp = (unsigned long*)(*sp);
}
// Prevent endless loops in case of invalid callstacks.
if (frame == MAX_CALLSTACK_DEPTH) {
st->print_cr("...(stopping after %d frames.", MAX_CALLSTACK_DEPTH);
}
end_walk_callstack:
st->print_cr("-----------------------");
cleanup:
return;
}

View File

@ -61,24 +61,37 @@ extern "C"
#endif
int dladdr(void *addr, Dl_info *info);
typedef unsigned int* codeptr_t;
struct tbtable;
// helper function - given a program counter, tries to locate the traceback table and
// returns info from it (like, most importantly, function name, displacement of the
// pc inside the function, and the traceback table itself.
#ifdef __cplusplus
extern "C"
#endif
int getFuncName(
codeptr_t pc, // [in] program counter
char* p_name, size_t namelen, // [out] optional: user provided buffer for the function name
int* p_displacement, // [out] optional: displacement
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further information
char* p_errmsg, size_t errmsglen, // [out] optional: user provided buffer for error messages
bool demangle // [in] whether to demangle the name
);
class AixSymbols {
public:
// Given a program counter, tries to locate the traceback table and returns info from
// it - e.g. function name, displacement of the pc inside the function, and the traceback
// table itself.
static bool get_function_name (
address pc, // [in] program counter
char* p_name, size_t namelen, // [out] optional: user provided buffer for the function name
int* p_displacement, // [out] optional: displacement
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further information
bool demangle // [in] whether to demangle the name
);
// Given a program counter, returns the name of the module (library and module) the pc points to
static bool get_module_name (
address pc, // [in] program counter
char* p_name, size_t namelen // [out] module name
);
};
class AixNativeCallstack {
public:
static void print_callstack_for_context(outputStream* st, const ucontext_t* uc,
bool demangle,
char* buf, size_t buf_size);
};
#endif // OS_AIX_VM_PORTING_AIX_HPP

View File

@ -3481,7 +3481,7 @@ jint os::init_2(void) {
os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
2*BytesPerWord COMPILER2_PRESENT(+1) * Bsd::page_size());
(2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,7 +68,7 @@ inline void os::pd_split_reserved_memory(char *base, size_t size,
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::bang_stack_shadow_pages() {
inline void os::map_stack_shadow_pages() {
}
inline void os::dll_unload(void *lib) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ inline void os::pd_split_reserved_memory(char *base, size_t size,
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::bang_stack_shadow_pages() {
inline void os::map_stack_shadow_pages() {
}
inline void os::dll_unload(void *lib) {

View File

@ -819,19 +819,19 @@ static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
void os::Solaris::hotspot_sigmask(Thread* thread) {
//Save caller's signal mask
sigset_t sigmask;
thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
pthread_sigmask(SIG_SETMASK, NULL, &sigmask);
OSThread *osthread = thread->osthread();
osthread->set_caller_sigmask(sigmask);
thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
pthread_sigmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
if (!ReduceSignalUsage) {
if (thread->is_VM_thread()) {
// Only the VM thread handles BREAK_SIGNAL ...
thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
} else {
// ... all other threads block BREAK_SIGNAL
assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
}
}
}
@ -1188,7 +1188,7 @@ void os::free_thread(OSThread* osthread) {
if (Thread::current()->osthread() == osthread) {
// Restore caller's signal mask
sigset_t sigmask = osthread->caller_sigmask();
thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
}
delete osthread;
}
@ -3561,7 +3561,7 @@ void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
sigset_t suspend_set; // signals for sigsuspend()
// get current set of blocked signals and unblock resume signal
thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, os::Solaris::SIGasync());
sr_semaphore.signal();
@ -3838,7 +3838,7 @@ static bool call_chained_handler(struct sigaction *actp, int sig,
// try to honor the signal mask
sigset_t oset;
thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
// call into the chained handler
if (siginfo_flag_set) {
@ -3848,7 +3848,7 @@ static bool call_chained_handler(struct sigaction *actp, int sig,
}
// restore the signal mask
thr_sigsetmask(SIG_SETMASK, &oset, 0);
pthread_sigmask(SIG_SETMASK, &oset, 0);
}
// Tell jvm's signal handler the signal is taken care of.
return true;
@ -4415,7 +4415,7 @@ jint os::init_2(void) {
os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
2*BytesPerWord COMPILER2_PRESENT(+1) * page_size);
(2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
@ -5492,7 +5492,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// (This allows a debugger to break into the running thread.)
sigset_t oldsigs;
sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
#endif
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
@ -5519,7 +5519,7 @@ void Parker::park(bool isAbsolute, jlong time) {
status, "cond_timedwait");
#ifdef ASSERT
thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
#endif
_counter = 0;
status = os::Solaris::mutex_unlock(_mutex);
@ -5667,8 +5667,6 @@ bool os::is_headless_jre() {
size_t os::write(int fd, const void *buf, unsigned int nBytes) {
size_t res;
assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
"Assumed _thread_in_native");
RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
return res;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@ inline void os::pd_split_reserved_memory(char *base, size_t size,
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::bang_stack_shadow_pages() {
inline void os::map_stack_shadow_pages() {
}
inline void os::dll_unload(void *lib) { ::dlclose(lib); }

View File

@ -3318,6 +3318,35 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
return os::uncommit_memory(addr, size);
}
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
uint count = 0;
bool ret = false;
size_t bytes_remaining = bytes;
char * next_protect_addr = addr;
// Use VirtualQuery() to get the chunk size.
while (bytes_remaining) {
MEMORY_BASIC_INFORMATION alloc_info;
if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
return false;
}
size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
// We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
// but we don't distinguish here as both cases are protected by same API.
ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
warning("Failed protecting pages individually for chunk #%u", count);
if (!ret) {
return false;
}
bytes_remaining -= bytes_to_protect;
next_protect_addr += bytes_to_protect;
count++;
}
return ret;
}
// Set protections specified
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
bool is_committed) {
@ -3345,7 +3374,25 @@ bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
// Pages in the region become guard pages. Any attempt to access a guard page
// causes the system to raise a STATUS_GUARD_PAGE exception and turn off
// the guard page status. Guard pages thus act as a one-time access alarm.
return VirtualProtect(addr, bytes, p, &old_status) != 0;
bool ret;
if (UseNUMAInterleaving) {
// If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
// so we must protect the chunks individually.
ret = protect_pages_individually(addr, bytes, p, &old_status);
} else {
ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
}
#ifdef ASSERT
if (!ret) {
int err = os::get_last_error();
char buf[256];
size_t buf_len = os::lasterror(buf, sizeof(buf));
warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
buf_len != 0 ? buf : "<no_error_string>", err);
}
#endif
return ret;
}
bool os::guard_memory(char* addr, size_t bytes) {
@ -3768,6 +3815,7 @@ HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
return NULL;
}
#define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
#define EXIT_TIMEOUT 300000 /* 5 minutes */
static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
@ -3786,7 +3834,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
// _endthreadex().
// Should be large enough to avoid blocking the exiting thread due to lack of
// a free slot.
static HANDLE handles[MAXIMUM_WAIT_OBJECTS];
static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
static int handle_count = 0;
static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
@ -3800,6 +3848,11 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
} else if (OrderAccess::load_acquire(&process_exiting) == 0) {
if (what != EPT_THREAD) {
// Atomically set process_exiting before the critical section
// to increase the visibility between racing threads.
Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
}
EnterCriticalSection(&crit_sect);
if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
@ -3820,14 +3873,14 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
// If there's no free slot in the array of the kept handles, we'll have to
// wait until at least one thread completes exiting.
if ((handle_count = j) == MAXIMUM_WAIT_OBJECTS) {
if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
// Raise the priority of the oldest exiting thread to increase its chances
// to complete sooner.
SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
i = (res - WAIT_OBJECT_0);
handle_count = MAXIMUM_WAIT_OBJECTS - 1;
handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
for (; i < handle_count; ++i) {
handles[i] = handles[i + 1];
}
@ -3836,7 +3889,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
(res == WAIT_FAILED ? "failed" : "timed out"),
GetLastError(), __FILE__, __LINE__);
// Don't keep handles, if we failed waiting for them.
for (i = 0; i < MAXIMUM_WAIT_OBJECTS; ++i) {
for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
CloseHandle(handles[i]);
}
handle_count = 0;
@ -3857,42 +3910,59 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
// The current exiting thread has stored its handle in the array, and now
// should leave the critical section before calling _endthreadex().
} else if (what != EPT_THREAD) {
if (handle_count > 0) {
// Before ending the process, make sure all the threads that had called
// _endthreadex() completed.
} else if (what != EPT_THREAD && handle_count > 0) {
jlong start_time, finish_time, timeout_left;
// Before ending the process, make sure all the threads that had called
// _endthreadex() completed.
// Set the priority level of the current thread to the same value as
// the priority level of exiting threads.
// This is to ensure it will be given a fair chance to execute if
// the timeout expires.
hthr = GetCurrentThread();
SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
for (i = 0; i < handle_count; ++i) {
SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL);
// Set the priority level of the current thread to the same value as
// the priority level of exiting threads.
// This is to ensure it will be given a fair chance to execute if
// the timeout expires.
hthr = GetCurrentThread();
SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
start_time = os::javaTimeNanos();
finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
for (i = 0; ; ) {
int portion_count = handle_count - i;
if (portion_count > MAXIMUM_WAIT_OBJECTS) {
portion_count = MAXIMUM_WAIT_OBJECTS;
}
res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT);
for (j = 0; j < portion_count; ++j) {
SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
}
timeout_left = (finish_time - start_time) / 1000000L;
if (timeout_left < 0) {
timeout_left = 0;
}
res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
(res == WAIT_FAILED ? "failed" : "timed out"),
GetLastError(), __FILE__, __LINE__);
// Reset portion_count so we close the remaining
// handles due to this error.
portion_count = handle_count - i;
}
for (i = 0; i < handle_count; ++i) {
CloseHandle(handles[i]);
for (j = 0; j < portion_count; ++j) {
CloseHandle(handles[i + j]);
}
handle_count = 0;
if ((i += portion_count) >= handle_count) {
break;
}
start_time = os::javaTimeNanos();
}
OrderAccess::release_store(&process_exiting, 1);
handle_count = 0;
}
LeaveCriticalSection(&crit_sect);
}
if (what == EPT_THREAD) {
while (OrderAccess::load_acquire(&process_exiting) != 0) {
// Some other thread is about to call exit(), so we
// don't let the current thread proceed to _endthreadex()
if (OrderAccess::load_acquire(&process_exiting) != 0 &&
process_exiting != (jint)GetCurrentThreadId()) {
// Some other thread is about to call exit(), so we
// don't let the current thread proceed to exit() or _endthreadex()
while (true) {
SuspendThread(GetCurrentThread());
// Avoid busy-wait loop, if SuspendThread() failed.
Sleep(EXIT_TIMEOUT);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,7 @@ inline int os::readdir_buf_size(const char *path)
}
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::bang_stack_shadow_pages() {
inline void os::map_stack_shadow_pages() {
// Write to each page of our new frame to force OS mapping.
// If we decrement stack pointer more than one page
// the OS may not map an intervening page into our space

View File

@ -40,6 +40,7 @@
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "porting_aix.hpp"
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
@ -579,3 +580,9 @@ int os::extra_bang_size_in_bytes() {
return 0;
}
bool os::platform_print_native_stack(outputStream* st, void* context, char *buf, int buf_size) {
AixNativeCallstack::print_callstack_for_context(st, (const ucontext_t*)context, true, buf, (size_t) buf_size);
return true;
}

View File

@ -32,4 +32,8 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#define PLATFORM_PRINT_NATIVE_STACK 1
static bool platform_print_native_stack(outputStream* st, void* context,
char *buf, int buf_size);
#endif // OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP

View File

@ -535,7 +535,7 @@ static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
}
void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
No_Safepoint_Verifier nsv;
NoSafepointVerifier nsv;
GrowableArray<oop> oops;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -552,12 +552,11 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// tracing
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;
log_info(exceptions)("Exception <%s> (" INTPTR_FORMAT
") thrown in compiled method <%s> at PC " INTPTR_FORMAT
" for thread " INTPTR_FORMAT,
exception->print_value_string(),
p2i((address)exception()),
nm->method()->print_value_string(), p2i(pc), p2i(thread));
stringStream tempst;
tempst.print("compiled method <%s>\n"
" at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
nm->method()->print_value_string(), p2i(pc), p2i(thread));
Exceptions::log_exception(exception, tempst);
}
// for AbortVMOnException flag
Exceptions::debug_check_abort(exception);

View File

@ -971,7 +971,7 @@ void ciEnv::register_method(ciMethod* target,
// and invalidating our dependencies until we install this method.
// No safepoints are allowed. Otherwise, class redefinition can occur in between.
MutexLocker ml(Compile_lock);
No_Safepoint_Verifier nsv;
NoSafepointVerifier nsv;
// Change in Jvmti state may invalidate compilation.
if (!failing() && jvmti_state_changed()) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ class ciInstance : public ciObject {
protected:
ciInstance(instanceHandle h_i) : ciObject(h_i) {
assert(h_i()->is_instance(), "wrong type");
assert(h_i()->is_instance_noinline(), "wrong type");
}
ciInstance(ciKlass* klass) : ciObject(klass) {}

View File

@ -300,4 +300,8 @@ void AltHashing::test_alt_hash() {
testMurmur3_32_ByteArray();
testEquivalentHashes();
}
void AltHashing_test() {
AltHashing::test_alt_hash();
}
#endif // PRODUCT

View File

@ -863,7 +863,7 @@ void ClassFileParser::parse_interfaces(const ClassFileStream* const stream,
initialize_hashtable(interface_names);
bool dup = false;
{
debug_only(No_Safepoint_Verifier nsv;)
debug_only(NoSafepointVerifier nsv;)
for (index = 0; index < itfs_len; index++) {
const Klass* const k = _local_interfaces->at(index);
const Symbol* const name = InstanceKlass::cast(k)->name();
@ -1620,7 +1620,7 @@ void ClassFileParser::parse_fields(const ClassFileStream* const cfs,
initialize_hashtable(names_and_sigs);
bool dup = false;
{
debug_only(No_Safepoint_Verifier nsv;)
debug_only(NoSafepointVerifier nsv;)
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
const Symbol* const name = fs.name();
const Symbol* const sig = fs.signature();
@ -2885,7 +2885,7 @@ void ClassFileParser::parse_methods(const ClassFileStream* const cfs,
initialize_hashtable(names_and_sigs);
bool dup = false;
{
debug_only(No_Safepoint_Verifier nsv;)
debug_only(NoSafepointVerifier nsv;)
for (int i = 0; i < length; i++) {
const Method* const m = _methods->at(i);
// If no duplicates, add name/signature in hashtable names_and_sigs.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,6 +43,7 @@
#include "memory/universe.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "prims/jvm_misc.hpp"

View File

@ -574,9 +574,9 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
// actual ClassLoaderData object.
ClassLoaderData::Dependencies dependencies(CHECK_NULL);
No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the
// ClassLoaderData in the graph since the CLD
// contains unhandled oops
NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
// ClassLoaderData in the graph since the CLD
// contains unhandled oops
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -275,9 +275,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// Used to make sure that this CLD is not unloaded.
void set_keep_alive(bool value) { _keep_alive = value; }
unsigned int identity_hash() const {
return _class_loader == NULL ? 0 : _class_loader->identity_hash();
}
inline unsigned int identity_hash() const;
// Used when tracing from klasses.
void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,11 @@
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.hpp"
#include "oops/oop.inline.hpp"
unsigned int ClassLoaderData::identity_hash() const {
return _class_loader == NULL ? 0 : _class_loader->identity_hash();
}
inline ClassLoaderData* ClassLoaderData::class_loader_data_or_null(oop loader) {
if (loader == NULL) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/sharedClassUtil.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/systemDictionary.hpp"
@ -500,6 +501,15 @@ void Dictionary::reorder_dictionary() {
}
}
unsigned int ProtectionDomainCacheTable::compute_hash(oop protection_domain) {
return (unsigned int)(protection_domain->identity_hash());
}
int ProtectionDomainCacheTable::index_for(oop protection_domain) {
return hash_to_index(compute_hash(protection_domain));
}
ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
: Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -196,14 +196,9 @@ private:
return entry;
}
static unsigned int compute_hash(oop protection_domain) {
return (unsigned int)(protection_domain->identity_hash());
}
int index_for(oop protection_domain) {
return hash_to_index(compute_hash(protection_domain));
}
static unsigned int compute_hash(oop protection_domain);
int index_for(oop protection_domain);
ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain);
ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "classfile/vmSymbols.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"

View File

@ -1524,7 +1524,7 @@ class BacktraceBuilder: public StackObj {
objArrayOop _mirrors;
typeArrayOop _cprefs; // needed to insulate method name against redefinition
int _index;
No_Safepoint_Verifier _nsv;
NoSafepointVerifier _nsv;
public:
@ -1583,7 +1583,7 @@ class BacktraceBuilder: public StackObj {
void expand(TRAPS) {
objArrayHandle old_head(THREAD, _head);
Pause_No_Safepoint_Verifier pnsv(&_nsv);
PauseNoSafepointVerifier pnsv(&_nsv);
objArrayOop head = oopFactory::new_objectArray(trace_size, CHECK);
objArrayHandle new_head(THREAD, head);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,12 +60,7 @@ class java_lang_String : AllStatic {
static Handle basic_create(int length, bool byte_arr, TRAPS);
static void set_coder(oop string, jbyte coder) {
assert(initialized, "Must be initialized");
if (coder_offset > 0) {
string->byte_field_put(coder_offset, coder);
}
}
static inline void set_coder(oop string, jbyte coder);
public:
@ -110,55 +105,15 @@ class java_lang_String : AllStatic {
return coder_offset;
}
static void set_value_raw(oop string, typeArrayOop buffer) {
assert(initialized, "Must be initialized");
string->obj_field_put_raw(value_offset, buffer);
}
static void set_value(oop string, typeArrayOop buffer) {
assert(initialized && (value_offset > 0), "Must be initialized");
string->obj_field_put(value_offset, (oop)buffer);
}
static void set_hash(oop string, unsigned int hash) {
assert(initialized && (hash_offset > 0), "Must be initialized");
string->int_field_put(hash_offset, hash);
}
static inline void set_value_raw(oop string, typeArrayOop buffer);
static inline void set_value(oop string, typeArrayOop buffer);
static inline void set_hash(oop string, unsigned int hash);
// Accessors
static typeArrayOop value(oop java_string) {
assert(initialized && (value_offset > 0), "Must be initialized");
assert(is_instance(java_string), "must be java_string");
return (typeArrayOop) java_string->obj_field(value_offset);
}
static unsigned int hash(oop java_string) {
assert(initialized && (hash_offset > 0), "Must be initialized");
assert(is_instance(java_string), "must be java_string");
return java_string->int_field(hash_offset);
}
static bool is_latin1(oop java_string) {
assert(initialized, "Must be initialized");
assert(is_instance(java_string), "must be java_string");
if (coder_offset > 0) {
jbyte coder = java_string->byte_field(coder_offset);
assert(CompactStrings || coder == CODER_UTF16, "Must be UTF16 without CompactStrings");
return coder == CODER_LATIN1;
} else {
return false;
}
}
static int length(oop java_string) {
assert(initialized, "Must be initialized");
assert(is_instance(java_string), "must be java_string");
typeArrayOop value_array = ((typeArrayOop)java_string->obj_field(value_offset));
if (value_array == NULL) {
return 0;
}
int arr_length = value_array->length();
if (!is_latin1(java_string)) {
assert((arr_length & 1) == 0, "should be even for UTF16 string");
arr_length >>= 1; // convert number of bytes to number of elements
}
return arr_length;
}
static inline typeArrayOop value(oop java_string);
static inline unsigned int hash(oop java_string);
static inline bool is_latin1(oop java_string);
static inline int length(oop java_string);
static int utf8_length(oop java_string);
// String converters
@ -219,7 +174,7 @@ class java_lang_String : AllStatic {
// Testers
static bool is_instance(oop obj);
static bool is_instance_inlined(oop obj);
static inline bool is_instance_inlined(oop obj);
// Debugging
static void print(oop java_string, outputStream* st);
@ -910,42 +865,19 @@ class java_lang_ref_Reference: AllStatic {
static int number_of_fake_oop_fields;
// Accessors
static oop referent(oop ref) {
return ref->obj_field(referent_offset);
}
static void set_referent(oop ref, oop value) {
ref->obj_field_put(referent_offset, value);
}
static void set_referent_raw(oop ref, oop value) {
ref->obj_field_put_raw(referent_offset, value);
}
static HeapWord* referent_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(referent_offset);
}
static oop next(oop ref) {
return ref->obj_field(next_offset);
}
static void set_next(oop ref, oop value) {
ref->obj_field_put(next_offset, value);
}
static void set_next_raw(oop ref, oop value) {
ref->obj_field_put_raw(next_offset, value);
}
static HeapWord* next_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(next_offset);
}
static oop discovered(oop ref) {
return ref->obj_field(discovered_offset);
}
static void set_discovered(oop ref, oop value) {
ref->obj_field_put(discovered_offset, value);
}
static void set_discovered_raw(oop ref, oop value) {
ref->obj_field_put_raw(discovered_offset, value);
}
static HeapWord* discovered_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(discovered_offset);
}
static inline oop referent(oop ref);
static inline void set_referent(oop ref, oop value);
static inline void set_referent_raw(oop ref, oop value);
static inline HeapWord* referent_addr(oop ref);
static inline oop next(oop ref);
static inline void set_next(oop ref, oop value);
static inline void set_next_raw(oop ref, oop value);
static inline HeapWord* next_addr(oop ref);
static inline oop discovered(oop ref);
static inline void set_discovered(oop ref, oop value);
static inline void set_discovered_raw(oop ref, oop value);
static inline HeapWord* discovered_addr(oop ref);
// Accessors for statics
static oop pending_list_lock();
static oop pending_list();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,105 @@
#include "oops/oop.inline.hpp"
#include "oops/oopsHierarchy.hpp"
void java_lang_String::set_coder(oop string, jbyte coder) {
assert(initialized, "Must be initialized");
if (coder_offset > 0) {
string->byte_field_put(coder_offset, coder);
}
}
void java_lang_String::set_value_raw(oop string, typeArrayOop buffer) {
assert(initialized, "Must be initialized");
string->obj_field_put_raw(value_offset, buffer);
}
void java_lang_String::set_value(oop string, typeArrayOop buffer) {
assert(initialized && (value_offset > 0), "Must be initialized");
string->obj_field_put(value_offset, (oop)buffer);
}
void java_lang_String::set_hash(oop string, unsigned int hash) {
assert(initialized && (hash_offset > 0), "Must be initialized");
string->int_field_put(hash_offset, hash);
}
// Accessors
typeArrayOop java_lang_String::value(oop java_string) {
assert(initialized && (value_offset > 0), "Must be initialized");
assert(is_instance(java_string), "must be java_string");
return (typeArrayOop) java_string->obj_field(value_offset);
}
unsigned int java_lang_String::hash(oop java_string) {
assert(initialized && (hash_offset > 0), "Must be initialized");
assert(is_instance(java_string), "must be java_string");
return java_string->int_field(hash_offset);
}
bool java_lang_String::is_latin1(oop java_string) {
assert(initialized, "Must be initialized");
assert(is_instance(java_string), "must be java_string");
if (coder_offset > 0) {
jbyte coder = java_string->byte_field(coder_offset);
assert(CompactStrings || coder == CODER_UTF16, "Must be UTF16 without CompactStrings");
return coder == CODER_LATIN1;
} else {
return false;
}
}
int java_lang_String::length(oop java_string) {
assert(initialized, "Must be initialized");
assert(is_instance(java_string), "must be java_string");
typeArrayOop value_array = ((typeArrayOop)java_string->obj_field(value_offset));
if (value_array == NULL) {
return 0;
}
int arr_length = value_array->length();
if (!is_latin1(java_string)) {
assert((arr_length & 1) == 0, "should be even for UTF16 string");
arr_length >>= 1; // convert number of bytes to number of elements
}
return arr_length;
}
bool java_lang_String::is_instance_inlined(oop obj) {
return obj != NULL && obj->klass() == SystemDictionary::String_klass();
}
// Accessors
oop java_lang_ref_Reference::referent(oop ref) {
return ref->obj_field(referent_offset);
}
void java_lang_ref_Reference::set_referent(oop ref, oop value) {
ref->obj_field_put(referent_offset, value);
}
void java_lang_ref_Reference::set_referent_raw(oop ref, oop value) {
ref->obj_field_put_raw(referent_offset, value);
}
HeapWord* java_lang_ref_Reference::referent_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(referent_offset);
}
oop java_lang_ref_Reference::next(oop ref) {
return ref->obj_field(next_offset);
}
void java_lang_ref_Reference::set_next(oop ref, oop value) {
ref->obj_field_put(next_offset, value);
}
void java_lang_ref_Reference::set_next_raw(oop ref, oop value) {
ref->obj_field_put_raw(next_offset, value);
}
HeapWord* java_lang_ref_Reference::next_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(next_offset);
}
oop java_lang_ref_Reference::discovered(oop ref) {
return ref->obj_field(discovered_offset);
}
void java_lang_ref_Reference::set_discovered(oop ref, oop value) {
ref->obj_field_put(discovered_offset, value);
}
void java_lang_ref_Reference::set_discovered_raw(oop ref, oop value) {
ref->obj_field_put_raw(discovered_offset, value);
}
HeapWord* java_lang_ref_Reference::discovered_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(discovered_offset);
}
inline void java_lang_invoke_CallSite::set_target_volatile(oop site, oop target) {
site->obj_field_put_volatile(_target_offset, target);
}
@ -41,10 +140,6 @@ inline void java_lang_invoke_CallSite::set_target(oop site, oop target) {
site->obj_field_put(_target_offset, target);
}
inline bool java_lang_String::is_instance_inlined(oop obj) {
return obj != NULL && obj->klass() == SystemDictionary::String_klass();
}
inline bool java_lang_invoke_CallSite::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
@ -73,6 +168,9 @@ inline bool java_lang_invoke_DirectMethodHandle::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
inline int Backtrace::merge_bci_and_version(int bci, int version) {
// only store u2 for version, checking for overflow.
if (version > USHRT_MAX || version < 0) version = USHRT_MAX;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/compactHashtable.inline.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
@ -136,7 +136,7 @@ oop StringTable::basic_add(int index_arg, Handle string, jchar* name,
assert(java_lang_String::equals(string(), name, len),
"string must be properly initialized");
// Cannot hit a safepoint in this function because the "this" pointer can move.
No_Safepoint_Verifier nsv;
NoSafepointVerifier nsv;
// Check if the symbol table has been rehashed, if so, need to recalculate
// the hash value and index before second lookup.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -264,7 +264,7 @@ Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
unsigned int hashValue;
char* name;
{
debug_only(No_Safepoint_Verifier nsv;)
debug_only(NoSafepointVerifier nsv;)
name = (char*)sym->base() + begin;
len = end - begin;
@ -288,7 +288,7 @@ Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
buffer[i] = name[i];
}
// Make sure there is no safepoint in the code above since name can't move.
// We can't include the code in No_Safepoint_Verifier because of the
// We can't include the code in NoSafepointVerifier because of the
// ResourceMark.
// Grab SymbolTable_lock first.
@ -405,7 +405,7 @@ Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
}
// Cannot hit a safepoint in this function because the "this" pointer can move.
No_Safepoint_Verifier nsv;
NoSafepointVerifier nsv;
// Check if the symbol table has been rehashed, if so, need to recalculate
// the hash value and index.
@ -454,7 +454,7 @@ bool SymbolTable::basic_add(ClassLoaderData* loader_data, const constantPoolHand
}
// Cannot hit a safepoint in this function because the "this" pointer can move.
No_Safepoint_Verifier nsv;
NoSafepointVerifier nsv;
for (int i=0; i<names_count; i++) {
// Check if the symbol table has been rehashed, if so, need to recalculate
@ -667,3 +667,53 @@ int SymboltableDCmd::num_arguments() {
return 0;
}
}
#ifndef PRODUCT
// Internal test of TempNewSymbol
void Test_TempNewSymbol() {
// Assert messages assume these symbols are unique, and the refcounts start at
// one, but code does not rely on this.
Thread* THREAD = Thread::current();
Symbol* abc = SymbolTable::new_symbol("abc", CATCH);
int abccount = abc->refcount();
TempNewSymbol ss = abc;
assert(ss->refcount() == abccount, "only one abc");
assert(ss->refcount() == abc->refcount(), "should match TempNewSymbol");
Symbol* efg = SymbolTable::new_symbol("efg", CATCH);
Symbol* hij = SymbolTable::new_symbol("hij", CATCH);
int efgcount = efg->refcount();
int hijcount = hij->refcount();
TempNewSymbol s1 = efg;
TempNewSymbol s2 = hij;
assert(s1->refcount() == efgcount, "one efg");
assert(s2->refcount() == hijcount, "one hij");
// Assignment operator
s1 = s2;
assert(hij->refcount() == hijcount + 1, "should be two hij");
assert(efg->refcount() == efgcount - 1, "should be no efg");
s1 = ss; // s1 is abc
assert(s1->refcount() == abccount + 1, "should be two abc (s1 and ss)");
assert(hij->refcount() == hijcount, "should only have one hij now (s2)");
s1 = s1; // self assignment
assert(s1->refcount() == abccount + 1, "should still be two abc (s1 and ss)");
TempNewSymbol s3;
Symbol* klm = SymbolTable::new_symbol("klm", CATCH);
int klmcount = klm->refcount();
s3 = klm; // assignment
assert(s3->refcount() == klmcount, "only one klm now");
Symbol* xyz = SymbolTable::new_symbol("xyz", CATCH);
int xyzcount = xyz->refcount();
{ // inner scope
TempNewSymbol s_inner = xyz;
}
assert(xyz->refcount() == (xyzcount - 1),
"Should have been decremented by dtor in inner scope");
}
#endif // PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,8 +42,17 @@
class BoolObjectClosure;
class outputStream;
// Class to hold a newly created or referenced Symbol* temporarily in scope.
// new_symbol() and lookup() will create a Symbol* if not already in the
// TempNewSymbol acts as a handle class in a handle/body idiom and is
// responsible for proper resource management of the body (which is a Symbol*).
// The body is resource managed by a reference counting scheme.
// TempNewSymbol can therefore be used to properly hold a newly created or referenced
// Symbol* temporarily in scope.
//
// Routines in SymbolTable will initialize the reference count of a Symbol* before
// it becomes "managed" by TempNewSymbol instances. As a handle class, TempNewSymbol
// needs to maintain proper reference counting in context of copy semantics.
//
// In SymbolTable, new_symbol() and lookup() will create a Symbol* if not already in the
// symbol table and add to the symbol's reference count.
// probe() and lookup_only() will increment the refcount if symbol is found.
class TempNewSymbol : public StackObj {
@ -51,25 +60,38 @@ class TempNewSymbol : public StackObj {
public:
TempNewSymbol() : _temp(NULL) {}
// Creating or looking up a symbol increments the symbol's reference count
// Conversion from a Symbol* to a TempNewSymbol.
// Does not increment the current reference count.
TempNewSymbol(Symbol *s) : _temp(s) {}
// Operator= increments reference count.
void operator=(const TempNewSymbol &s) {
//clear(); //FIXME
_temp = s._temp;
if (_temp !=NULL) _temp->increment_refcount();
// Copy constructor increments reference count.
TempNewSymbol(const TempNewSymbol& rhs) : _temp(rhs._temp) {
if (_temp != NULL) {
_temp->increment_refcount();
}
}
// Decrement reference counter so it can go away if it's unique
void clear() { if (_temp != NULL) _temp->decrement_refcount(); _temp = NULL; }
// Assignment operator uses a c++ trick called copy and swap idiom.
// rhs is passed by value so within the scope of this method it is a copy.
// At method exit it contains the former value of _temp, triggering the correct refcount
// decrement upon destruction.
void operator=(TempNewSymbol rhs) {
Symbol* tmp = rhs._temp;
rhs._temp = _temp;
_temp = tmp;
}
~TempNewSymbol() { clear(); }
// Decrement reference counter so it can go away if it's unused
~TempNewSymbol() {
if (_temp != NULL) {
_temp->decrement_refcount();
}
}
// Operators so they can be used like Symbols
// Symbol* conversion operators
Symbol* operator -> () const { return _temp; }
bool operator == (Symbol* o) const { return _temp == o; }
// Sneaky conversion function
operator Symbol*() { return _temp; }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -475,11 +475,11 @@ void SystemDictionary::validate_protection_domain(instanceKlassHandle klass,
// Note that we have an entry, and entries can be deleted only during GC,
// so we cannot allow GC to occur while we're holding this entry.
// We're using a No_Safepoint_Verifier to catch any place where we
// We're using a NoSafepointVerifier to catch any place where we
// might potentially do a GC at all.
// Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint;
NoSafepointVerifier nosafepoint;
dictionary()->add_protection_domain(d_index, d_hash, klass, loader_data,
protection_domain, THREAD);
}
@ -908,11 +908,11 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
MutexLocker mu(SystemDictionary_lock, THREAD);
// Note that we have an entry, and entries can be deleted only during GC,
// so we cannot allow GC to occur while we're holding this entry.
// We're using a No_Safepoint_Verifier to catch any place where we
// We're using a NoSafepointVerifier to catch any place where we
// might potentially do a GC at all.
// Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint;
NoSafepointVerifier nosafepoint;
if (dictionary()->is_valid_protection_domain(d_index, d_hash, name,
loader_data,
protection_domain)) {
@ -961,11 +961,11 @@ Klass* SystemDictionary::find(Symbol* class_name,
{
// Note that we have an entry, and entries can be deleted only during GC,
// so we cannot allow GC to occur while we're holding this entry.
// We're using a No_Safepoint_Verifier to catch any place where we
// We're using a NoSafepointVerifier to catch any place where we
// might potentially do a GC at all.
// Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint;
NoSafepointVerifier nosafepoint;
return dictionary()->find(d_index, d_hash, class_name, loader_data,
protection_domain, THREAD);
}
@ -1974,12 +1974,11 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
InstanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER);
InstanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass));
initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Cleaner_klass), scan, CHECK);
initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK);
InstanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT);
InstanceKlass::cast(WK_KLASS(WeakReference_klass))->set_reference_type(REF_WEAK);
InstanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL);
InstanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM);
InstanceKlass::cast(WK_KLASS(Cleaner_klass))->set_reference_type(REF_CLEANER);
// JSR 292 classes
WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
@ -2211,7 +2210,7 @@ bool SystemDictionary::add_loader_constraint(Symbol* class_name,
MutexLocker mu_s(SystemDictionary_lock, THREAD);
// Better never do a GC while we're holding these oops
No_Safepoint_Verifier nosafepoint;
NoSafepointVerifier nosafepoint;
Klass* klass1 = find_class(d_index1, d_hash1, constraint_name, loader_data1);
Klass* klass2 = find_class(d_index2, d_hash2, constraint_name, loader_data2);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -128,7 +128,6 @@ class SymbolPropertyTable;
do_klass(WeakReference_klass, java_lang_ref_WeakReference, Pre ) \
do_klass(FinalReference_klass, java_lang_ref_FinalReference, Pre ) \
do_klass(PhantomReference_klass, java_lang_ref_PhantomReference, Pre ) \
do_klass(Cleaner_klass, sun_misc_Cleaner, Pre ) \
do_klass(Finalizer_klass, java_lang_ref_Finalizer, Pre ) \
\
do_klass(Thread_klass, java_lang_Thread, Pre ) \

View File

@ -2004,7 +2004,7 @@ bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
Symbol* field_name,
Symbol* field_sig,
bool is_method) {
No_Safepoint_Verifier nosafepoint;
NoSafepointVerifier nosafepoint;
// If target class isn't a super class of this class, we don't worry about this case
if (!this_class->is_subclass_of(target_class)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -82,7 +82,6 @@
template(java_lang_ref_WeakReference, "java/lang/ref/WeakReference") \
template(java_lang_ref_FinalReference, "java/lang/ref/FinalReference") \
template(java_lang_ref_PhantomReference, "java/lang/ref/PhantomReference") \
template(sun_misc_Cleaner, "sun/misc/Cleaner") \
template(java_lang_ref_Finalizer, "java/lang/ref/Finalizer") \
template(java_lang_reflect_AccessibleObject, "java/lang/reflect/AccessibleObject") \
template(java_lang_reflect_Method, "java/lang/reflect/Method") \

View File

@ -1034,7 +1034,7 @@ int CodeCache::mark_for_deoptimization(DepChange& changes) {
// implementor.
// nmethod::check_all_dependencies works only correctly, if no safepoint
// can happen
No_Safepoint_Verifier nsv;
NoSafepointVerifier nsv;
for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
Klass* d = str.klass();
number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1949,3 +1949,10 @@ void Dependencies::print_statistics() {
}
}
#endif
CallSiteDepChange::CallSiteDepChange(Handle call_site, Handle method_handle) :
_call_site(call_site),
_method_handle(method_handle) {
assert(_call_site()->is_a(SystemDictionary::CallSite_klass()), "must be");
assert(_method_handle.is_null() || _method_handle()->is_a(SystemDictionary::MethodHandle_klass()), "must be");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,7 @@ class CompileLog;
class DepChange;
class KlassDepChange;
class CallSiteDepChange;
class No_Safepoint_Verifier;
class NoSafepointVerifier;
class Dependencies: public ResourceObj {
public:
@ -713,7 +713,7 @@ class DepChange : public StackObj {
: _changes(changes)
{ start(); }
ContextStream(DepChange& changes, No_Safepoint_Verifier& nsv)
ContextStream(DepChange& changes, NoSafepointVerifier& nsv)
: _changes(changes)
// the nsv argument makes it safe to hold oops like _klass
{ start(); }
@ -767,13 +767,7 @@ class CallSiteDepChange : public DepChange {
Handle _method_handle;
public:
CallSiteDepChange(Handle call_site, Handle method_handle)
: _call_site(call_site),
_method_handle(method_handle)
{
assert(_call_site() ->is_a(SystemDictionary::CallSite_klass()), "must be");
assert(_method_handle.is_null() || _method_handle()->is_a(SystemDictionary::MethodHandle_klass()), "must be");
}
CallSiteDepChange(Handle call_site, Handle method_handle);
// What kind of DepChange is this?
virtual bool is_call_site_change() const { return true; }

View File

@ -692,7 +692,7 @@ nmethod::nmethod(
_native_basic_lock_sp_offset(basic_lock_sp_offset)
{
{
debug_only(No_Safepoint_Verifier nsv;)
debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
init_defaults();
@ -796,7 +796,7 @@ nmethod::nmethod(
{
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
{
debug_only(No_Safepoint_Verifier nsv;)
debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
init_defaults();
@ -1412,7 +1412,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
nmethodLocker nml(this);
methodHandle the_method(method());
No_Safepoint_Verifier nsv;
NoSafepointVerifier nsv;
// during patching, depending on the nmethod state we must notify the GC that
// code has been unloaded, unregistering it. We cannot do this right while

View File

@ -375,7 +375,7 @@ CompileTask* CompileQueue::get() {
CompileTask* task;
{
No_Safepoint_Verifier nsv;
NoSafepointVerifier nsv;
task = CompilationPolicy::policy()->select_task(this);
}

View File

@ -608,7 +608,7 @@ void DirectivesParser::test(const char* text, bool should_pass) {
cd.clean_tmp();
}
bool DirectivesParser::test() {
void DirectivesParser::test() {
DirectivesParser::test("{}", false);
DirectivesParser::test("[]", true);
DirectivesParser::test("[{}]", false);
@ -742,8 +742,10 @@ bool DirectivesParser::test() {
" }" "\n"
" }" "\n"
"]" "\n", false);
}
return true;
void DirectivesParser_test() {
DirectivesParser::test();
}
#endif

View File

@ -136,7 +136,7 @@ private:
#ifndef PRODUCT
static void test(const char* json, bool valid);
public:
static bool test();
static void test();
#endif
};

View File

@ -37,7 +37,7 @@ class CMSBitMap;
class CMSMarkStack;
class CMSCollector;
class MarkFromRootsClosure;
class Par_MarkFromRootsClosure;
class ParMarkFromRootsClosure;
// Decode the oop and call do_oop on it.
#define DO_OOP_WORK_DEFN \
@ -82,14 +82,14 @@ class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
virtual void do_oop(narrowOop* p);
};
class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
private:
const MemRegion _span;
CMSBitMap* _bitMap;
protected:
DO_OOP_WORK_DEFN
public:
Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
};
@ -141,7 +141,7 @@ class PushAndMarkClosure: public MetadataAwareOopClosure {
// synchronization (for instance, via CAS). The marking stack
// used in the non-parallel case above is here replaced with
// an OopTaskQueue structure to allow efficient work stealing.
class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
class ParPushAndMarkClosure: public MetadataAwareOopClosure {
private:
CMSCollector* _collector;
MemRegion _span;
@ -150,15 +150,15 @@ class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
protected:
DO_OOP_WORK_DEFN
public:
Par_PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
OopTaskQueue* work_queue);
ParPushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
};
// The non-parallel version (the parallel version appears further below).
@ -203,25 +203,25 @@ class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
// stack and the bitMap are shared, so access needs to be suitably
// synchronized. An OopTaskQueue structure, supporting efficient
// work stealing, replaces a CMSMarkStack for storing grey objects.
class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
private:
MemRegion _span;
CMSBitMap* _bit_map;
OopTaskQueue* _work_queue;
const uint _low_water_mark;
Par_PushAndMarkClosure _par_pushAndMarkClosure;
MemRegion _span;
CMSBitMap* _bit_map;
OopTaskQueue* _work_queue;
const uint _low_water_mark;
ParPushAndMarkClosure _parPushAndMarkClosure;
protected:
DO_OOP_WORK_DEFN
public:
Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
void trim_queue(uint size);
};
@ -261,8 +261,8 @@ class PushOrMarkClosure: public MetadataAwareOopClosure {
// A parallel (MT) version of the above.
// This closure is used during the concurrent marking phase
// following the first checkpoint. Its use is buried in
// the closure Par_MarkFromRootsClosure.
class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
// the closure ParMarkFromRootsClosure.
class ParPushOrMarkClosure: public MetadataAwareOopClosure {
private:
CMSCollector* _collector;
MemRegion _whole_span;
@ -272,23 +272,23 @@ class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
CMSMarkStack* _overflow_stack;
HeapWord* const _finger;
HeapWord** const _global_finger_addr;
Par_MarkFromRootsClosure* const
ParMarkFromRootsClosure* const
_parent;
protected:
DO_OOP_WORK_DEFN
public:
Par_PushOrMarkClosure(CMSCollector* cms_collector,
MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* mark_stack,
HeapWord* finger,
HeapWord** global_finger_addr,
Par_MarkFromRootsClosure* parent);
ParPushOrMarkClosure(CMSCollector* cms_collector,
MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* mark_stack,
HeapWord* finger,
HeapWord** global_finger_addr,
ParMarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);

View File

@ -31,7 +31,7 @@
#include "oops/oop.inline.hpp"
// Trim our work_queue so its length is below max at return
inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop newOop;
if (_work_queue->pop_local(newOop)) {
@ -40,7 +40,7 @@ inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
"only grey objects on this stack");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
newOop->oop_iterate(&_par_pushAndMarkClosure);
newOop->oop_iterate(&_parPushAndMarkClosure);
}
}
}

View File

@ -576,7 +576,7 @@ void CompactibleFreeListSpace::set_end(HeapWord* value) {
}
}
class FreeListSpace_DCTOC : public Filtering_DCTOC {
class FreeListSpaceDCTOC : public FilteringDCTOC {
CompactibleFreeListSpace* _cfls;
CMSCollector* _collector;
bool _parallel;
@ -596,21 +596,21 @@ protected:
walk_mem_region_with_cl_DECL(FilteringClosure);
public:
FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
CMSCollector* collector,
ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) :
Filtering_DCTOC(sp, cl, precision, boundary),
FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
CMSCollector* collector,
ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) :
FilteringDCTOC(sp, cl, precision, boundary),
_cfls(sp), _collector(collector), _parallel(parallel) {}
};
// We de-virtualize the block-related calls below, since we know that our
// space is a CompactibleFreeListSpace.
#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
#define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
@ -620,10 +620,10 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
} \
} \
void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
/* Skip parts that are before "mr", in case "block_start" sent us \
back too far. */ \
HeapWord* mr_start = mr.start(); \
@ -647,10 +647,10 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,
} \
} \
} \
void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
/* Skip parts that are before "mr", in case "block_start" sent us \
back too far. */ \
HeapWord* mr_start = mr.start(); \
@ -678,15 +678,15 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,
// (There are only two of these, rather than N, because the split is due
// only to the introduction of the FilteringClosure, a local part of the
// impl of this abstraction.)
FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure*
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
}
@ -2413,7 +2413,7 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
}
///////////////////////////////////////////////////////////////////////////
// CFLS_LAB
// CompactibleFreeListSpaceLAB
///////////////////////////////////////////////////////////////////////////
#define VECTOR_257(x) \
@ -2432,12 +2432,12 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
// generic OldPLABSize, whose static default is different; if overridden at the
// command-line, this will get reinitialized via a call to
// modify_initialization() below.
AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[] =
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
size_t CompactibleFreeListSpaceLAB::_global_num_blocks[] = VECTOR_257(0);
uint CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
_cfls(cfls)
{
assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
@ -2451,7 +2451,7 @@ CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
static bool _CFLS_LAB_modified = false;
void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
assert(!_CFLS_LAB_modified, "Call only once");
_CFLS_LAB_modified = true;
for (size_t i = CompactibleFreeListSpace::IndexSetStart;
@ -2461,7 +2461,7 @@ void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
}
}
HeapWord* CFLS_LAB::alloc(size_t word_sz) {
HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
FreeChunk* res;
assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
@ -2491,7 +2491,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
// Get a chunk of blocks of the right size and update related
// book-keeping stats
void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
// Get the #blocks we want to claim
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
assert(n_blks > 0, "Error");
@ -2525,7 +2525,7 @@ void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>*
_num_blocks[word_sz] += fl->count();
}
void CFLS_LAB::compute_desired_plab_size() {
void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
for (size_t i = CompactibleFreeListSpace::IndexSetStart;
i < CompactibleFreeListSpace::IndexSetSize;
i += CompactibleFreeListSpace::IndexSetStride) {
@ -2551,7 +2551,7 @@ void CFLS_LAB::compute_desired_plab_size() {
// access, one would need to take the FL locks and,
// depending on how it is used, stagger access from
// parallel threads to reduce contention.
void CFLS_LAB::retire(int tid) {
void CompactibleFreeListSpaceLAB::retire(int tid) {
// We run this single threaded with the world stopped;
// so no need for locks and such.
NOT_PRODUCT(Thread* t = Thread::current();)

View File

@ -75,7 +75,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
friend class ConcurrentMarkSweepGeneration;
friend class CMSCollector;
// Local alloc buffer for promotion into this space.
friend class CFLS_LAB;
friend class CompactibleFreeListSpaceLAB;
// Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
template <typename SpaceType>
friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
@ -662,7 +662,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// A parallel-GC-thread-local allocation buffer for allocation into a
// CompactibleFreeListSpace.
class CFLS_LAB : public CHeapObj<mtGC> {
class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> {
// The space that this buffer allocates into.
CompactibleFreeListSpace* _cfls;
@ -686,7 +686,7 @@ public:
static const int _default_dynamic_old_plab_size = 16;
static const int _default_static_old_plab_size = 50;
CFLS_LAB(CompactibleFreeListSpace* cfls);
CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls);
// Allocate and return a block of the given size, or else return NULL.
HeapWord* alloc(size_t word_sz);

View File

@ -183,7 +183,7 @@ NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
// young-gen collection.
class CMSParGCThreadState: public CHeapObj<mtGC> {
public:
CFLS_LAB lab;
CompactibleFreeListSpaceLAB lab;
PromotionInfo promo;
// Constructor.
@ -1110,7 +1110,7 @@ bool ConcurrentMarkSweepGeneration::should_collect(bool full,
bool CMSCollector::shouldConcurrentCollect() {
if (_full_gc_requested) {
log_trace(gc)("CMSCollector: collect because of explicit gc request (or gc_locker)");
log_trace(gc)("CMSCollector: collect because of explicit gc request (or GCLocker)");
return true;
}
@ -1269,12 +1269,12 @@ void CMSCollector::collect(bool full,
{
// The following "if" branch is present for defensive reasons.
// In the current uses of this interface, it can be replaced with:
// assert(!GC_locker.is_active(), "Can't be called otherwise");
// assert(!GCLocker.is_active(), "Can't be called otherwise");
// But I am not placing that assert here to allow future
// generality in invoking this interface.
if (GC_locker::is_active()) {
// A consistency test for GC_locker
assert(GC_locker::needs_gc(), "Should have been set already");
if (GCLocker::is_active()) {
// A consistency test for GCLocker
assert(GCLocker::needs_gc(), "Should have been set already");
// Skip this foreground collection, instead
// expanding the heap if necessary.
// Need the free list locks for the call to free() in compute_new_size()
@ -3272,10 +3272,10 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
// Do the marking work within a non-empty span --
// the last argument to the constructor indicates whether the
// iteration should be incremental with periodic yields.
Par_MarkFromRootsClosure cl(this, _collector, my_span,
&_collector->_markBitMap,
work_queue(i),
&_collector->_markStack);
ParMarkFromRootsClosure cl(this, _collector, my_span,
&_collector->_markBitMap,
work_queue(i),
&_collector->_markStack);
_collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
} // else nothing to do for this task
} // else nothing to do for this task
@ -3291,7 +3291,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
pst->all_tasks_completed();
}
class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
class ParConcMarkingClosure: public MetadataAwareOopClosure {
private:
CMSCollector* _collector;
CMSConcMarkingTask* _task;
@ -3302,8 +3302,8 @@ class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
protected:
DO_OOP_WORK_DEFN
public:
Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
MetadataAwareOopClosure(collector->ref_processor()),
_collector(collector),
_task(task),
@ -3330,7 +3330,7 @@ class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
// already have been initialized (else they would not have
// been published), so we do not need to check for
// uninitialized objects before pushing here.
void Par_ConcMarkingClosure::do_oop(oop obj) {
void ParConcMarkingClosure::do_oop(oop obj) {
assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
@ -3366,10 +3366,10 @@ void Par_ConcMarkingClosure::do_oop(oop obj) {
}
}
void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
void ParConcMarkingClosure::do_oop(oop* p) { ParConcMarkingClosure::do_oop_work(p); }
void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
void Par_ConcMarkingClosure::trim_queue(size_t max) {
void ParConcMarkingClosure::trim_queue(size_t max) {
while (_work_queue->size() > max) {
oop new_oop;
if (_work_queue->pop_local(new_oop)) {
@ -3385,7 +3385,7 @@ void Par_ConcMarkingClosure::trim_queue(size_t max) {
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
// in CMSCollector's _restart_address.
void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
// We need to do this under a mutex to prevent other
// workers from interfering with the work done below.
MutexLockerEx ml(_overflow_stack->par_lock(),
@ -3404,7 +3404,7 @@ void CMSConcMarkingTask::do_work_steal(int i) {
CMSBitMap* bm = &(_collector->_markBitMap);
CMSMarkStack* ovflw = &(_collector->_markStack);
int* seed = _collector->hash_seed(i);
Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
while (true) {
cl.trim_queue(0);
assert(work_q->size() == 0, "Should have been emptied above");
@ -4246,7 +4246,7 @@ void CMSParInitialMarkTask::work(uint worker_id) {
// ---------- scan from roots --------------
_timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap();
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
// ---------- young gen roots --------------
{
@ -4312,10 +4312,10 @@ class CMSParRemarkTask: public CMSParMarkTask {
private:
// ... of dirty cards in old space
void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
Par_MarkRefsIntoAndScanClosure* cl);
ParMarkRefsIntoAndScanClosure* cl);
// ... work stealing for the above
void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
};
class RemarkKlassClosure : public KlassClosure {
@ -4361,7 +4361,7 @@ void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* c
}
// work_queue(i) is passed to the closure
// Par_MarkRefsIntoAndScanClosure. The "i" parameter
// ParMarkRefsIntoAndScanClosure. The "i" parameter
// also is passed to do_dirty_card_rescan_tasks() and to
// do_work_steal() to select the i-th task_queue.
@ -4373,7 +4373,7 @@ void CMSParRemarkTask::work(uint worker_id) {
// ---------- rescan from roots --------------
_timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap();
Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
_collector->_span, _collector->ref_processor(),
&(_collector->_markBitMap),
work_queue(worker_id));
@ -4522,7 +4522,7 @@ CMSParMarkTask::do_young_space_rescan(uint worker_id,
void
CMSParRemarkTask::do_dirty_card_rescan_tasks(
CompactibleFreeListSpace* sp, int i,
Par_MarkRefsIntoAndScanClosure* cl) {
ParMarkRefsIntoAndScanClosure* cl) {
// Until all tasks completed:
// . claim an unclaimed task
// . compute region boundaries corresponding to task claimed
@ -4614,7 +4614,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
// . see if we can share work_queues with ParNew? XXX
void
CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
int* seed) {
OopTaskQueue* work_q = work_queue(i);
NOT_PRODUCT(int num_steals = 0;)
@ -5832,7 +5832,7 @@ void MarkRefsIntoClosure::do_oop(oop obj) {
void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
MemRegion span, CMSBitMap* bitMap):
_span(span),
_bitMap(bitMap)
@ -5841,7 +5841,7 @@ Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
void Par_MarkRefsIntoClosure::do_oop(oop obj) {
void ParMarkRefsIntoClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap
assert(obj->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)obj;
@ -5851,8 +5851,8 @@ void Par_MarkRefsIntoClosure::do_oop(oop obj) {
}
}
void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
void ParMarkRefsIntoClosure::do_oop(oop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
// A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
@ -5989,10 +5989,10 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
}
///////////////////////////////////////////////////////////
// Par_MarkRefsIntoAndScanClosure: a parallel version of
// MarkRefsIntoAndScanClosure
// ParMarkRefsIntoAndScanClosure: a parallel version of
// MarkRefsIntoAndScanClosure
///////////////////////////////////////////////////////////
Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
CMSBitMap* bit_map, OopTaskQueue* work_queue):
_span(span),
@ -6000,7 +6000,7 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
_work_queue(work_queue),
_low_water_mark(MIN2((work_queue->max_elems()/4),
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
_parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
{
// FIXME: Should initialize in base class constructor.
assert(rp != NULL, "ref_processor shouldn't be NULL");
@ -6014,7 +6014,7 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
// the scan phase whence they are also available for stealing by parallel
// threads. Since the marking bit map is shared, updates are
// synchronized (via CAS).
void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
if (obj != NULL) {
// Ignore mark word because this could be an already marked oop
// that may be chained at the end of the overflow list.
@ -6041,8 +6041,8 @@ void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
}
}
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
void ParMarkRefsIntoAndScanClosure::do_oop(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
// This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper.
@ -6426,7 +6426,7 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
}
Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
@ -6449,7 +6449,7 @@ Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
// Should revisit to see if this should be restructured for
// greater efficiency.
bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
bool ParMarkFromRootsClosure::do_bit(size_t offset) {
if (_skip_bits > 0) {
_skip_bits--;
return true;
@ -6474,7 +6474,7 @@ bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
return true;
}
void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
assert(_bit_map->isMarked(ptr), "expected bit to be set");
// Should we assert that our work queue is empty or
// below some drain limit?
@ -6524,12 +6524,12 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
// Note: the local finger doesn't advance while we drain
// the stack below, but the global finger sure can and will.
HeapWord** gfa = _task->global_finger_addr();
Par_PushOrMarkClosure pushOrMarkClosure(_collector,
_span, _bit_map,
_work_queue,
_overflow_stack,
_finger,
gfa, this);
ParPushOrMarkClosure pushOrMarkClosure(_collector,
_span, _bit_map,
_work_queue,
_overflow_stack,
_finger,
gfa, this);
bool res = _work_queue->push(obj); // overflow could occur here
assert(res, "Will hold once we use workqueues");
while (true) {
@ -6557,7 +6557,7 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
// Yield in response to a request from VM Thread or
// from mutators.
void Par_MarkFromRootsClosure::do_yield_work() {
void ParMarkFromRootsClosure::do_yield_work() {
assert(_task != NULL, "sanity");
_task->yield();
}
@ -6684,14 +6684,14 @@ PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
_parent(parent)
{ }
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* overflow_stack,
HeapWord* finger,
HeapWord** global_finger_addr,
Par_MarkFromRootsClosure* parent) :
ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* overflow_stack,
HeapWord* finger,
HeapWord** global_finger_addr,
ParMarkFromRootsClosure* parent) :
MetadataAwareOopClosure(collector->ref_processor()),
_collector(collector),
_whole_span(collector->_span),
@ -6729,7 +6729,7 @@ void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
// in CMSCollector's _restart_address.
void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
// We need to do this under a mutex to prevent other
// workers from interfering with the work done below.
MutexLockerEx ml(_overflow_stack->par_lock(),
@ -6776,7 +6776,7 @@ void PushOrMarkClosure::do_oop(oop obj) {
void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
void Par_PushOrMarkClosure::do_oop(oop obj) {
void ParPushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
HeapWord* addr = (HeapWord*)obj;
@ -6822,8 +6822,8 @@ void Par_PushOrMarkClosure::do_oop(oop obj) {
}
}
void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
void ParPushOrMarkClosure::do_oop(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
@ -6900,11 +6900,11 @@ void PushAndMarkClosure::do_oop(oop obj) {
}
}
Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
OopTaskQueue* work_queue):
ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
OopTaskQueue* work_queue):
MetadataAwareOopClosure(rp),
_collector(collector),
_span(span),
@ -6919,7 +6919,7 @@ void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(
// Grey object rescan during second checkpoint phase --
// the parallel version.
void Par_PushAndMarkClosure::do_oop(oop obj) {
void ParPushAndMarkClosure::do_oop(oop obj) {
// In the assert below, we ignore the mark word because
// this oop may point to an already visited object that is
// on the overflow stack (in which case the mark word has
@ -6959,8 +6959,8 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
}
}
void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
void ParPushAndMarkClosure::do_oop(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
void CMSPrecleanRefsYieldClosure::do_yield_work() {
Mutex* bml = _collector->bitMapLock();

View File

@ -510,17 +510,17 @@ class CMSCollector: public CHeapObj<mtGC> {
friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
friend class SurvivorSpacePrecleanClosure; // --- ditto -------
friend class PushOrMarkClosure; // to access _restart_addr
friend class Par_PushOrMarkClosure; // to access _restart_addr
friend class ParPushOrMarkClosure; // to access _restart_addr
friend class MarkFromRootsClosure; // -- ditto --
// ... and for clearing cards
friend class Par_MarkFromRootsClosure; // to access _restart_addr
friend class ParMarkFromRootsClosure; // to access _restart_addr
// ... and for clearing cards
friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
friend class ParConcMarkingClosure; // to access _restart_addr etc.
friend class MarkFromRootsVerifyClosure; // to access _restart_addr
friend class PushAndMarkVerifyClosure; // -- ditto --
friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
friend class PushAndMarkClosure; // -- ditto --
friend class Par_PushAndMarkClosure; // -- ditto --
friend class ParPushAndMarkClosure; // -- ditto --
friend class CMSKeepAliveClosure; // -- ditto --
friend class CMSDrainMarkingStackClosure; // -- ditto --
friend class CMSInnerParMarkAndPushClosure; // -- ditto --
@ -1282,7 +1282,7 @@ class MarkFromRootsClosure: public BitMapClosure {
// marking from the roots following the first checkpoint.
// XXX This should really be a subclass of The serial version
// above, but i have not had the time to refactor things cleanly.
class Par_MarkFromRootsClosure: public BitMapClosure {
class ParMarkFromRootsClosure: public BitMapClosure {
CMSCollector* _collector;
MemRegion _whole_span;
MemRegion _span;
@ -1295,11 +1295,11 @@ class Par_MarkFromRootsClosure: public BitMapClosure {
HeapWord* _threshold;
CMSConcMarkingTask* _task;
public:
Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* overflow_stack);
ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* overflow_stack);
bool do_bit(size_t offset);
inline void do_yield_check();
@ -1400,8 +1400,8 @@ class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
bool _parallel;
CMSBitMap* _bit_map;
union {
MarkRefsIntoAndScanClosure* _scan_closure;
Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
MarkRefsIntoAndScanClosure* _scan_closure;
ParMarkRefsIntoAndScanClosure* _par_scan_closure;
};
public:
@ -1425,7 +1425,7 @@ class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
ReferenceProcessor* rp,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
Par_MarkRefsIntoAndScanClosure* cl):
ParMarkRefsIntoAndScanClosure* cl):
#ifdef ASSERT
_collector(collector),
_span(span),
@ -1470,7 +1470,7 @@ class MarkFromDirtyCardsClosure: public MemRegionClosure {
CompactibleFreeListSpace* space,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
Par_MarkRefsIntoAndScanClosure* cl):
ParMarkRefsIntoAndScanClosure* cl):
_space(space),
_num_dirty_cards(0),
_scan_cl(collector, span, collector->ref_processor(), bit_map,

View File

@ -381,7 +381,7 @@ inline void MarkFromRootsClosure::do_yield_check() {
}
}
inline void Par_MarkFromRootsClosure::do_yield_check() {
inline void ParMarkFromRootsClosure::do_yield_check() {
if (ConcurrentMarkSweepThread::should_yield() &&
!_collector->foregroundGCIsActive()) {
do_yield_work();
@ -392,7 +392,7 @@ inline void PushOrMarkClosure::do_yield_check() {
_parent->do_yield_check();
}
inline void Par_PushOrMarkClosure::do_yield_check() {
inline void ParPushOrMarkClosure::do_yield_check() {
_parent->do_yield_check();
}

View File

@ -455,7 +455,7 @@ void ParScanThreadStateSet::flush() {
// Every thread has its own age table. We need to merge
// them all into one.
ageTable *local_table = par_scan_state.age_table();
AgeTable *local_table = par_scan_state.age_table();
_young_gen.age_table()->merge(local_table);
// Inform old gen that we're done.
@ -469,7 +469,7 @@ void ParScanThreadStateSet::flush() {
// to avoid this by reorganizing the code a bit, I am loathe
// to do that unless we find cases where ergo leads to bad
// performance.
CFLS_LAB::compute_desired_plab_size();
CompactibleFreeListSpaceLAB::compute_desired_plab_size();
}
}

View File

@ -94,7 +94,7 @@ class ParScanThreadState {
int _hash_seed;
int _thread_num;
ageTable _ageTable;
AgeTable _ageTable;
bool _to_space_full;
@ -132,7 +132,7 @@ class ParScanThreadState {
ParallelTaskTerminator& term_);
public:
ageTable* age_table() {return &_ageTable;}
AgeTable* age_table() {return &_ageTable;}
ObjToScanQueue* work_queue() { return _work_queue; }

View File

@ -203,7 +203,7 @@ void VM_GenCollectFullConcurrent::doit() {
gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
} // Else no need for a foreground young gc
assert((_gc_count_before < gch->total_collections()) ||
(GC_locker::is_active() /* gc may have been skipped */
(GCLocker::is_active() /* gc may have been skipped */
&& (_gc_count_before == gch->total_collections())),
"total_collections() should be monotonically increasing");

View File

@ -32,6 +32,72 @@
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
// Represents a set of free small integer ids.
class FreeIdSet : public CHeapObj<mtGC> {
enum {
end_of_list = UINT_MAX,
claimed = UINT_MAX - 1
};
uint _size;
Monitor* _mon;
uint* _ids;
uint _hd;
uint _waiters;
uint _claimed;
public:
FreeIdSet(uint size, Monitor* mon);
~FreeIdSet();
// Returns an unclaimed parallel id (waiting for one to be released if
// necessary).
uint claim_par_id();
void release_par_id(uint id);
};
FreeIdSet::FreeIdSet(uint size, Monitor* mon) :
_size(size), _mon(mon), _hd(0), _waiters(0), _claimed(0)
{
guarantee(size != 0, "must be");
_ids = NEW_C_HEAP_ARRAY(uint, size, mtGC);
for (uint i = 0; i < size - 1; i++) {
_ids[i] = i+1;
}
_ids[size-1] = end_of_list; // end of list.
}
FreeIdSet::~FreeIdSet() {
FREE_C_HEAP_ARRAY(uint, _ids);
}
uint FreeIdSet::claim_par_id() {
MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
while (_hd == end_of_list) {
_waiters++;
_mon->wait(Mutex::_no_safepoint_check_flag);
_waiters--;
}
uint res = _hd;
_hd = _ids[res];
_ids[res] = claimed; // For debugging.
_claimed++;
return res;
}
void FreeIdSet::release_par_id(uint id) {
MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
assert(_ids[id] == claimed, "Precondition.");
_ids[id] = _hd;
_hd = id;
_claimed--;
if (_waiters > 0) {
_mon->notify_all();
}
}
DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
// Dirty card queues are always active, so we create them with their
// active field set to true.
@ -103,7 +169,8 @@ void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
int process_completed_threshold,
int max_completed_queue,
Mutex* lock,
DirtyCardQueueSet* fl_owner) {
DirtyCardQueueSet* fl_owner,
bool init_free_ids) {
_mut_process_closure = cl;
PtrQueueSet::initialize(cbl_mon,
fl_lock,
@ -112,7 +179,9 @@ void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
fl_owner);
set_buffer_size(G1UpdateBufferSize);
_shared_dirty_card_queue.set_lock(lock);
_free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
if (init_free_ids) {
_free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
}
}
void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
@ -120,48 +189,20 @@ void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
}
bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
guarantee(_free_ids != NULL, "must be");
// Used to determine if we had already claimed a par_id
// before entering this method.
bool already_claimed = false;
// claim a par id
uint worker_i = _free_ids->claim_par_id();
// We grab the current JavaThread.
JavaThread* thread = JavaThread::current();
// We get the the number of any par_id that this thread
// might have already claimed.
uint worker_i = thread->get_claimed_par_id();
// If worker_i is not UINT_MAX then the thread has already claimed
// a par_id. We make note of it using the already_claimed value
if (worker_i != UINT_MAX) {
already_claimed = true;
} else {
// Otherwise we need to claim a par id
worker_i = _free_ids->claim_par_id();
// And store the par_id value in the thread
thread->set_claimed_par_id(worker_i);
bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0,
_sz, true, worker_i);
if (b) {
Atomic::inc(&_processed_buffers_mut);
}
bool b = false;
if (worker_i != UINT_MAX) {
b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0,
_sz, true, worker_i);
if (b) Atomic::inc(&_processed_buffers_mut);
// release the id
_free_ids->release_par_id(worker_i);
// If we had not claimed an id before entering the method
// then we must release the id.
if (!already_claimed) {
// we release the id
_free_ids->release_par_id(worker_i);
// and set the claimed_id in the thread to UINT_MAX
thread->set_claimed_par_id(UINT_MAX);
}
}
return b;
}

View File

@ -116,7 +116,8 @@ public:
int process_completed_threshold,
int max_completed_queue,
Mutex* lock,
DirtyCardQueueSet* fl_owner = NULL);
DirtyCardQueueSet* fl_owner,
bool init_free_ids = false);
// The number of parallel ids that can be claimed to allow collector or
// mutator threads to do card-processing work.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -277,7 +277,7 @@ HeapRegion* OldGCAllocRegion::release() {
// Determine how far we are from the next card boundary. If it is smaller than
// the minimum object size we can allocate into, expand into the next card.
HeapWord* top = cur->top();
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, BOTConstants::N_bytes);
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,38 +35,32 @@
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetSharedArray
// G1BlockOffsetTable
//////////////////////////////////////////////////////////////////////
G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
_reserved(), _end(NULL), _listener(), _offset_array(NULL) {
_reserved = heap;
_end = NULL;
G1BlockOffsetTable::G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage) :
_reserved(heap), _offset_array(NULL) {
MemRegion bot_reserved = storage->reserved();
_offset_array = (u_char*)bot_reserved.start();
_end = _reserved.end();
storage->set_mapping_changed_listener(&_listener);
log_trace(gc, bot)("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
log_trace(gc, bot)("G1BlockOffsetTable::G1BlockOffsetTable: ");
log_trace(gc, bot)(" rs.base(): " PTR_FORMAT " rs.size(): " SIZE_FORMAT " rs end(): " PTR_FORMAT,
p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end()));
}
bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
bool G1BlockOffsetTable::is_card_boundary(HeapWord* p) const {
assert(p >= _reserved.start(), "just checking");
size_t delta = pointer_delta(p, _reserved.start());
return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
return (delta & right_n_bits((int)BOTConstants::LogN_words)) == (size_t)NoBits;
}
#ifdef ASSERT
void G1BlockOffsetSharedArray::check_index(size_t index, const char* msg) const {
assert((index) < (_reserved.word_size() >> LogN_words),
void G1BlockOffsetTable::check_index(size_t index, const char* msg) const {
assert((index) < (_reserved.word_size() >> BOTConstants::LogN_words),
"%s - index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
msg, (index), (_reserved.word_size() >> LogN_words));
msg, (index), (_reserved.word_size() >> BOTConstants::LogN_words));
assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)),
"Index " SIZE_FORMAT " corresponding to " PTR_FORMAT
" (%u) is not in committed area.",
@ -77,25 +71,19 @@ void G1BlockOffsetSharedArray::check_index(size_t index, const char* msg) const
#endif // ASSERT
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetArray
// G1BlockOffsetTablePart
//////////////////////////////////////////////////////////////////////
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
MemRegion mr) :
G1BlockOffsetTable(mr.start(), mr.end()),
_unallocated_block(_bottom),
_array(array), _gsp(NULL) {
assert(_bottom <= _end, "arguments out of order");
}
void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
_gsp = sp;
}
G1BlockOffsetTablePart::G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp) :
_bot(array),
_space(gsp),
_next_offset_threshold(NULL),
_next_offset_index(0)
{ }
// The arguments follow the normal convention of denoting
// a right-open interval: [start, end)
void
G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
void G1BlockOffsetTablePart:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
if (start >= end) {
// The start address is equal to the end address (or to
@ -137,38 +125,37 @@ G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord*
// Move back N (e.g., 8) entries and repeat with the
// value of the new entry
//
size_t start_card = _array->index_for(start);
size_t end_card = _array->index_for(end-1);
assert(start ==_array->address_for_index(start_card), "Precondition");
assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
size_t start_card = _bot->index_for(start);
size_t end_card = _bot->index_for(end-1);
assert(start ==_bot->address_for_index(start_card), "Precondition");
assert(end ==_bot->address_for_index(end_card)+BOTConstants::N_words, "Precondition");
set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
}
// Unlike the normal convention in this code, the argument here denotes
// a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
// above.
void
G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
void G1BlockOffsetTablePart::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
if (start_card > end_card) {
return;
}
assert(start_card > _array->index_for(_bottom), "Cannot be first card");
assert(_array->offset_array(start_card-1) <= N_words,
assert(start_card > _bot->index_for(_space->bottom()), "Cannot be first card");
assert(_bot->offset_array(start_card-1) <= BOTConstants::N_words,
"Offset card has an unexpected value");
size_t start_card_for_region = start_card;
u_char offset = max_jubyte;
for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
for (uint i = 0; i < BOTConstants::N_powers; i++) {
// -1 so that the the card with the actual offset is counted. Another -1
// so that the reach ends in this region and not at the start
// of the next.
size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
offset = N_words + i;
size_t reach = start_card - 1 + (BOTConstants::power_to_cards_back(i+1) - 1);
offset = BOTConstants::N_words + i;
if (reach >= end_card) {
_array->set_offset_array(start_card_for_region, end_card, offset);
_bot->set_offset_array(start_card_for_region, end_card, offset);
start_card_for_region = reach + 1;
break;
}
_array->set_offset_array(start_card_for_region, reach, offset);
_bot->set_offset_array(start_card_for_region, reach, offset);
start_card_for_region = reach + 1;
}
assert(start_card_for_region > end_card, "Sanity check");
@ -178,79 +165,44 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size
// The card-interval [start_card, end_card] is a closed interval; this
// is an expensive check -- use with care and only under protection of
// suitable flag.
void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
void G1BlockOffsetTablePart::check_all_cards(size_t start_card, size_t end_card) const {
if (end_card < start_card) {
return;
}
guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
guarantee(_bot->offset_array(start_card) == BOTConstants::N_words, "Wrong value in second card");
for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
u_char entry = _array->offset_array(c);
if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
guarantee(entry > N_words,
u_char entry = _bot->offset_array(c);
if (c - start_card > BOTConstants::power_to_cards_back(1)) {
guarantee(entry > BOTConstants::N_words,
"Should be in logarithmic region - "
"entry: %u, "
"_array->offset_array(c): %u, "
"N_words: %u",
(uint)entry, (uint)_array->offset_array(c), (uint)N_words);
(uint)entry, (uint)_bot->offset_array(c), BOTConstants::N_words);
}
size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
size_t backskip = BOTConstants::entry_to_cards_back(entry);
size_t landing_card = c - backskip;
guarantee(landing_card >= (start_card - 1), "Inv");
if (landing_card >= start_card) {
guarantee(_array->offset_array(landing_card) <= entry,
guarantee(_bot->offset_array(landing_card) <= entry,
"Monotonicity - landing_card offset: %u, "
"entry: %u",
(uint)_array->offset_array(landing_card), (uint)entry);
(uint)_bot->offset_array(landing_card), (uint)entry);
} else {
guarantee(landing_card == start_card - 1, "Tautology");
// Note that N_words is the maximum offset value
guarantee(_array->offset_array(landing_card) <= N_words,
guarantee(_bot->offset_array(landing_card) <= BOTConstants::N_words,
"landing card offset: %u, "
"N_words: %u",
(uint)_array->offset_array(landing_card), (uint)N_words);
(uint)_bot->offset_array(landing_card), (uint)BOTConstants::N_words);
}
}
}
HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array");
// Must read this exactly once because it can be modified by parallel
// allocation.
HeapWord* ub = _unallocated_block;
if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
assert(ub < _end, "tautology (see above)");
return ub;
}
// Otherwise, find the block start using the table.
HeapWord* q = block_at_or_preceding(addr, false, 0);
return forward_to_block_containing_addr(q, addr);
}
// This duplicates a little code from the above: unavoidable.
HeapWord*
G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array");
// Must read this exactly once because it can be modified by parallel
// allocation.
HeapWord* ub = _unallocated_block;
if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
assert(ub < _end, "tautology (see above)");
return ub;
}
// Otherwise, find the block start using the table.
HeapWord* q = block_at_or_preceding(addr, false, 0);
HeapWord* n = q + block_size(q);
return forward_to_block_containing_addr_const(q, n, addr);
}
HeapWord*
G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
HeapWord* n,
const void* addr) {
HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr_slow(HeapWord* q,
HeapWord* n,
const void* addr) {
// We're not in the normal case. We need to handle an important subcase
// here: LAB allocation. An allocation previously recorded in the
// offset table was actually a lab allocation, and was divided into
@ -260,17 +212,17 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
// If the fist object's end q is at the card boundary. Start refining
// with the corresponding card (the value of the entry will be basically
// set to 0). If the object crosses the boundary -- start from the next card.
size_t n_index = _array->index_for(n);
size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
size_t n_index = _bot->index_for(n);
size_t next_index = _bot->index_for(n) + !_bot->is_card_boundary(n);
// Calculate a consistent next boundary. If "n" is not at the boundary
// already, step to the boundary.
HeapWord* next_boundary = _array->address_for_index(n_index) +
(n_index == next_index ? 0 : N_words);
assert(next_boundary <= _array->_end,
HeapWord* next_boundary = _bot->address_for_index(n_index) +
(n_index == next_index ? 0 : BOTConstants::N_words);
assert(next_boundary <= _bot->_reserved.end(),
"next_boundary is beyond the end of the covered region "
" next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
p2i(next_boundary), p2i(_array->_end));
if (addr >= gsp()->top()) return gsp()->top();
p2i(next_boundary), p2i(_bot->_reserved.end()));
if (addr >= _space->top()) return _space->top();
while (next_boundary < addr) {
while (n <= next_boundary) {
q = n;
@ -280,18 +232,11 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
}
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
// [q, n) is the block that crosses the boundary.
alloc_block_work2(&next_boundary, &next_index, q, n);
alloc_block_work(&next_boundary, &next_index, q, n);
}
return forward_to_block_containing_addr_const(q, n, addr);
}
// Note that the committed size of the covered space may have changed,
// so the table size might also wish to change.
void G1BlockOffsetArray::resize(size_t new_word_size) {
HeapWord* new_end = _bottom + new_word_size;
_end = new_end; // update _end
}
//
// threshold_
// | _index_
@ -302,8 +247,8 @@ void G1BlockOffsetArray::resize(size_t new_word_size) {
// ( ^ ]
// block-start
//
void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
HeapWord* blk_start, HeapWord* blk_end) {
void G1BlockOffsetTablePart::alloc_block_work(HeapWord** threshold_, size_t* index_,
HeapWord* blk_start, HeapWord* blk_end) {
// For efficiency, do copy-in/copy-out.
HeapWord* threshold = *threshold_;
size_t index = *index_;
@ -312,13 +257,13 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
"phantom block");
assert(blk_end > threshold, "should be past threshold");
assert(blk_start <= threshold, "blk_start should be at or before threshold");
assert(pointer_delta(threshold, blk_start) <= N_words,
assert(pointer_delta(threshold, blk_start) <= BOTConstants::N_words,
"offset should be <= BlockOffsetSharedArray::N");
assert(G1CollectedHeap::heap()->is_in_reserved(blk_start),
"reference must be into the heap");
assert(G1CollectedHeap::heap()->is_in_reserved(blk_end-1),
"limit must be within the heap");
assert(threshold == _array->_reserved.start() + index*N_words,
assert(threshold == _bot->_reserved.start() + index*BOTConstants::N_words,
"index must agree with threshold");
DEBUG_ONLY(size_t orig_index = index;)
@ -326,26 +271,26 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
// Mark the card that holds the offset into the block. Note
// that _next_offset_index and _next_offset_threshold are not
// updated until the end of this method.
_array->set_offset_array(index, threshold, blk_start);
_bot->set_offset_array(index, threshold, blk_start);
// We need to now mark the subsequent cards that this blk spans.
// Index of card on which blk ends.
size_t end_index = _array->index_for(blk_end - 1);
size_t end_index = _bot->index_for(blk_end - 1);
// Are there more cards left to be updated?
if (index + 1 <= end_index) {
HeapWord* rem_st = _array->address_for_index(index + 1);
HeapWord* rem_st = _bot->address_for_index(index + 1);
// Calculate rem_end this way because end_index
// may be the last valid index in the covered region.
HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
HeapWord* rem_end = _bot->address_for_index(end_index) + BOTConstants::N_words;
set_remainder_to_point_to_start(rem_st, rem_end);
}
index = end_index + 1;
// Calculate threshold_ this way because end_index
// may be the last valid index in the covered region.
threshold = _array->address_for_index(end_index) + N_words;
threshold = _bot->address_for_index(end_index) + BOTConstants::N_words;
assert(threshold >= blk_end, "Incorrect offset threshold");
// index_ and threshold_ updated here.
@ -355,55 +300,55 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
#ifdef ASSERT
// The offset can be 0 if the block starts on a boundary. That
// is checked by an assertion above.
size_t start_index = _array->index_for(blk_start);
HeapWord* boundary = _array->address_for_index(start_index);
assert((_array->offset_array(orig_index) == 0 && blk_start == boundary) ||
(_array->offset_array(orig_index) > 0 && _array->offset_array(orig_index) <= N_words),
size_t start_index = _bot->index_for(blk_start);
HeapWord* boundary = _bot->address_for_index(start_index);
assert((_bot->offset_array(orig_index) == 0 && blk_start == boundary) ||
(_bot->offset_array(orig_index) > 0 && _bot->offset_array(orig_index) <= BOTConstants::N_words),
"offset array should have been set - "
"orig_index offset: %u, "
"blk_start: " PTR_FORMAT ", "
"boundary: " PTR_FORMAT,
(uint)_array->offset_array(orig_index),
(uint)_bot->offset_array(orig_index),
p2i(blk_start), p2i(boundary));
for (size_t j = orig_index + 1; j <= end_index; j++) {
assert(_array->offset_array(j) > 0 &&
_array->offset_array(j) <=
(u_char) (N_words+BlockOffsetArray::N_powers-1),
assert(_bot->offset_array(j) > 0 &&
_bot->offset_array(j) <=
(u_char) (BOTConstants::N_words+BOTConstants::N_powers-1),
"offset array should have been set - "
"%u not > 0 OR %u not <= %u",
(uint) _array->offset_array(j),
(uint) _array->offset_array(j),
(uint) (N_words+BlockOffsetArray::N_powers-1));
(uint) _bot->offset_array(j),
(uint) _bot->offset_array(j),
(uint) (BOTConstants::N_words+BOTConstants::N_powers-1));
}
#endif
}
void G1BlockOffsetArray::verify() const {
assert(gsp()->bottom() < gsp()->top(), "Only non-empty regions should be verified.");
size_t start_card = _array->index_for(gsp()->bottom());
size_t end_card = _array->index_for(gsp()->top() - 1);
void G1BlockOffsetTablePart::verify() const {
assert(_space->bottom() < _space->top(), "Only non-empty regions should be verified.");
size_t start_card = _bot->index_for(_space->bottom());
size_t end_card = _bot->index_for(_space->top() - 1);
for (size_t current_card = start_card; current_card < end_card; current_card++) {
u_char entry = _array->offset_array(current_card);
if (entry < N_words) {
u_char entry = _bot->offset_array(current_card);
if (entry < BOTConstants::N_words) {
// The entry should point to an object before the current card. Verify that
// it is possible to walk from that object in to the current card by just
// iterating over the objects following it.
HeapWord* card_address = _array->address_for_index(current_card);
HeapWord* card_address = _bot->address_for_index(current_card);
HeapWord* obj_end = card_address - entry;
while (obj_end < card_address) {
HeapWord* obj = obj_end;
size_t obj_size = block_size(obj);
obj_end = obj + obj_size;
guarantee(obj_end > obj && obj_end <= gsp()->top(),
guarantee(obj_end > obj && obj_end <= _space->top(),
"Invalid object end. obj: " PTR_FORMAT " obj_size: " SIZE_FORMAT " obj_end: " PTR_FORMAT " top: " PTR_FORMAT,
p2i(obj), obj_size, p2i(obj_end), p2i(gsp()->top()));
p2i(obj), obj_size, p2i(obj_end), p2i(_space->top()));
}
} else {
// Because we refine the BOT based on which cards are dirty there is not much we can verify here.
// We need to make sure that we are going backwards and that we don't pass the start of the
// corresponding heap region. But that is about all we can verify.
size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
size_t backskip = BOTConstants::entry_to_cards_back(entry);
guarantee(backskip >= 1, "Must be going back at least one card.");
size_t max_backskip = current_card - start_card;
@ -411,103 +356,66 @@ void G1BlockOffsetArray::verify() const {
"Going backwards beyond the start_card. start_card: " SIZE_FORMAT " current_card: " SIZE_FORMAT " backskip: " SIZE_FORMAT,
start_card, current_card, backskip);
HeapWord* backskip_address = _array->address_for_index(current_card - backskip);
guarantee(backskip_address >= gsp()->bottom(),
HeapWord* backskip_address = _bot->address_for_index(current_card - backskip);
guarantee(backskip_address >= _space->bottom(),
"Going backwards beyond bottom of the region: bottom: " PTR_FORMAT ", backskip_address: " PTR_FORMAT,
p2i(gsp()->bottom()), p2i(backskip_address));
p2i(_space->bottom()), p2i(backskip_address));
}
}
}
#ifndef PRODUCT
void
G1BlockOffsetArray::print_on(outputStream* out) {
size_t from_index = _array->index_for(_bottom);
size_t to_index = _array->index_for(_end);
G1BlockOffsetTablePart::print_on(outputStream* out) {
size_t from_index = _bot->index_for(_space->bottom());
size_t to_index = _bot->index_for(_space->end());
out->print_cr(">> BOT for area [" PTR_FORMAT "," PTR_FORMAT ") "
"cards [" SIZE_FORMAT "," SIZE_FORMAT ")",
p2i(_bottom), p2i(_end), from_index, to_index);
p2i(_space->bottom()), p2i(_space->end()), from_index, to_index);
for (size_t i = from_index; i < to_index; ++i) {
out->print_cr(" entry " SIZE_FORMAT_W(8) " | " PTR_FORMAT " : %3u",
i, p2i(_array->address_for_index(i)),
(uint) _array->offset_array(i));
i, p2i(_bot->address_for_index(i)),
(uint) _bot->offset_array(i));
}
}
#endif // !PRODUCT
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetArrayContigSpace
//////////////////////////////////////////////////////////////////////
HeapWord*
G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array");
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
return forward_to_block_containing_addr(q, addr);
}
HeapWord*
G1BlockOffsetArrayContigSpace::
block_start_unsafe_const(const void* addr) const {
assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array");
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
HeapWord* n = q + block_size(q);
return forward_to_block_containing_addr_const(q, n, addr);
}
G1BlockOffsetArrayContigSpace::
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
MemRegion mr) :
G1BlockOffsetArray(array, mr)
{
_next_offset_threshold = NULL;
_next_offset_index = 0;
}
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
_next_offset_index = _array->index_for_raw(_bottom);
_next_offset_index++;
_next_offset_threshold =
_array->address_for_index_raw(_next_offset_index);
return _next_offset_threshold;
}
void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
size_t bottom_index = _array->index_for_raw(_bottom);
assert(_array->address_for_index_raw(bottom_index) == _bottom,
"Precondition of call");
_array->set_offset_array_raw(bottom_index, 0);
}
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
_next_offset_index = _array->index_for(_bottom);
_next_offset_index++;
_next_offset_threshold =
_array->address_for_index(_next_offset_index);
return _next_offset_threshold;
}
void G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* obj_top, size_t fill_size) {
// The first BOT entry should have offset 0.
reset_bot();
alloc_block(_bottom, obj_top);
if (fill_size > 0) {
alloc_block(obj_top, fill_size);
}
}
#ifndef PRODUCT
void G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
G1BlockOffsetArray::print_on(out);
out->print_cr(" next offset threshold: " PTR_FORMAT, p2i(_next_offset_threshold));
out->print_cr(" next offset index: " SIZE_FORMAT, _next_offset_index);
}
#endif // !PRODUCT
HeapWord* G1BlockOffsetTablePart::initialize_threshold_raw() {
assert(!G1CollectedHeap::heap()->is_in_reserved(_bot->_offset_array),
"just checking");
_next_offset_index = _bot->index_for_raw(_space->bottom());
_next_offset_index++;
_next_offset_threshold =
_bot->address_for_index_raw(_next_offset_index);
return _next_offset_threshold;
}
void G1BlockOffsetTablePart::zero_bottom_entry_raw() {
assert(!G1CollectedHeap::heap()->is_in_reserved(_bot->_offset_array),
"just checking");
size_t bottom_index = _bot->index_for_raw(_space->bottom());
assert(_bot->address_for_index_raw(bottom_index) == _space->bottom(),
"Precondition of call");
_bot->set_offset_array_raw(bottom_index, 0);
}
HeapWord* G1BlockOffsetTablePart::initialize_threshold() {
assert(!G1CollectedHeap::heap()->is_in_reserved(_bot->_offset_array),
"just checking");
_next_offset_index = _bot->index_for(_space->bottom());
_next_offset_index++;
_next_offset_threshold =
_bot->address_for_index(_next_offset_index);
return _next_offset_threshold;
}
void G1BlockOffsetTablePart::set_for_starts_humongous(HeapWord* obj_top, size_t fill_size) {
// The first BOT entry should have offset 0.
reset_bot();
alloc_block(_space->bottom(), obj_top);
if (fill_size > 0) {
alloc_block(obj_top, fill_size);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,131 +26,38 @@
#define SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/shared/blockOffsetTable.hpp"
#include "memory/memRegion.hpp"
#include "memory/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
// The CollectedHeap type requires subtypes to implement a method
// "block_start". For some subtypes, notably generational
// systems using card-table-based write barriers, the efficiency of this
// operation may be important. Implementations of the "BlockOffsetArray"
// class may be useful in providing such efficient implementations.
//
// While generally mirroring the structure of the BOT for GenCollectedHeap,
// the following types are tailored more towards G1's uses; these should,
// however, be merged back into a common BOT to avoid code duplication
// and reduce maintenance overhead.
//
// G1BlockOffsetTable (abstract)
// -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
// -- G1BlockOffsetArrayContigSpace
//
// A main impediment to the consolidation of this code might be the
// effect of making some of the block_start*() calls non-const as
// below. Whether that might adversely affect performance optimizations
// that compilers might normally perform in the case of non-G1
// collectors needs to be carefully investigated prior to any such
// consolidation.
// Forward declarations
class G1BlockOffsetSharedArray;
class G1OffsetTableContigSpace;
class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
protected:
// These members describe the region covered by the table.
// The space this table is covering.
HeapWord* _bottom; // == reserved.start
HeapWord* _end; // End of currently allocated region.
public:
// Initialize the table to cover the given space.
// The contents of the initial table are undefined.
G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
_bottom(bottom), _end(end)
{
assert(_bottom <= _end, "arguments out of order");
}
// Note that the committed size of the covered space may have changed,
// so the table size might also wish to change.
virtual void resize(size_t new_word_size) = 0;
virtual void set_bottom(HeapWord* new_bottom) {
assert(new_bottom <= _end,
"new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
p2i(new_bottom), p2i(_end));
_bottom = new_bottom;
resize(pointer_delta(_end, _bottom));
}
// Requires "addr" to be contained by a block, and returns the address of
// the start of that block. (May have side effects, namely updating of
// shared array entries that "point" too far backwards. This can occur,
// for example, when LAB allocation is used in a space covered by the
// table.)
virtual HeapWord* block_start_unsafe(const void* addr) = 0;
// Same as above, but does not have any of the possible side effects
// discussed above.
virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
// Returns the address of the start of the block containing "addr", or
// else "null" if it is covered by no block. (May have side effects,
// namely updating of shared array entries that "point" too far
// backwards. This can occur, for example, when lab allocation is used
// in a space covered by the table.)
inline HeapWord* block_start(const void* addr);
// Same as above, but does not have any of the possible side effects
// discussed above.
inline HeapWord* block_start_const(const void* addr) const;
};
class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
public:
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
// retrieve it here since this would cause firing of several asserts. The code
// executed after commit of a region already needs to do some re-initialization of
// the HeapRegion, so we combine that.
}
};
class G1BlockOffsetTable;
class G1ContiguousSpace;
// This implementation of "G1BlockOffsetTable" divides the covered region
// into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
// for each such subregion indicates how far back one must go to find the
// start of the chunk that includes the first word of the subregion.
//
// Each BlockOffsetArray is owned by a Space. However, the actual array
// may be shared by several BlockOffsetArrays; this is useful
// when a single resizable area (such as a generation) is divided up into
// several spaces in which contiguous allocation takes place,
// such as, for example, in G1 or in the train generation.)
// Each G1BlockOffsetTablePart is owned by a G1ContiguousSpace.
// Here is the shared array type.
class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
friend class G1BlockOffsetArray;
friend class G1BlockOffsetArrayContigSpace;
class G1BlockOffsetTable: public CHeapObj<mtGC> {
friend class G1BlockOffsetTablePart;
friend class VMStructs;
private:
G1BlockOffsetSharedArrayMappingChangedListener _listener;
// The reserved region covered by the shared array.
// The reserved region covered by the table.
MemRegion _reserved;
// End of the current committed region.
HeapWord* _end;
// Array for keeping offsets for retrieving object start fast given an
// address.
u_char* _offset_array; // byte array keeping backwards offsets
void check_offset(size_t offset, const char* msg) const {
assert(offset <= N_words,
assert(offset <= BOTConstants::N_words,
"%s - offset: " SIZE_FORMAT ", N_words: %u",
msg, offset, (uint)N_words);
msg, offset, BOTConstants::N_words);
}
// Bounds checking accessors:
@ -176,29 +83,18 @@ public:
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
static size_t compute_size(size_t mem_region_words) {
size_t number_of_slots = (mem_region_words / N_words);
size_t number_of_slots = (mem_region_words / BOTConstants::N_words);
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
// Returns how many bytes of the heap a single byte of the BOT corresponds to.
static size_t heap_map_factor() {
return N_bytes;
return BOTConstants::N_bytes;
}
enum SomePublicConstants {
LogN = 9,
LogN_words = LogN - LogHeapWordSize,
N_bytes = 1 << LogN,
N_words = 1 << LogN_words
};
// Initialize the table to cover from "base" to (at least)
// "base + init_word_size". In the future, the table may be expanded
// (see "resize" below) up to the size of "_reserved" (which must be at
// least "init_word_size".) The contents of the initial table are
// undefined; it is the responsibility of the constituent
// G1BlockOffsetTable(s) to initialize cards.
G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
// Initialize the Block Offset Table to cover the memory region passed
// in the heap parameter.
G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage);
// Return the appropriate index into "_offset_array" for "p".
inline size_t index_for(const void* p) const;
@ -209,33 +105,23 @@ public:
inline HeapWord* address_for_index(size_t index) const;
// Variant of address_for_index that does not check the index for validity.
inline HeapWord* address_for_index_raw(size_t index) const {
return _reserved.start() + (index << LogN_words);
return _reserved.start() + (index << BOTConstants::LogN_words);
}
};
// And here is the G1BlockOffsetTable subtype that uses the array.
class G1BlockOffsetArray: public G1BlockOffsetTable {
friend class G1BlockOffsetSharedArray;
friend class G1BlockOffsetArrayContigSpace;
class G1BlockOffsetTablePart VALUE_OBJ_CLASS_SPEC {
friend class G1BlockOffsetTable;
friend class VMStructs;
private:
enum SomePrivateConstants {
N_words = G1BlockOffsetSharedArray::N_words,
LogN = G1BlockOffsetSharedArray::LogN
};
// allocation boundary at which offset array must be updated
HeapWord* _next_offset_threshold;
size_t _next_offset_index; // index corresponding to that boundary
// This is the array, which can be shared by several BlockOffsetArray's
// servicing different
G1BlockOffsetSharedArray* _array;
// This is the global BlockOffsetTable.
G1BlockOffsetTable* _bot;
// The space that owns this subregion.
G1OffsetTableContigSpace* _gsp;
// The portion [_unallocated_block, _sp.end()) of the space that
// is a single block known not to contain any objects.
// NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
HeapWord* _unallocated_block;
G1ContiguousSpace* _space;
// Sets the entries
// corresponding to the cards starting at "start" and ending at "end"
@ -246,9 +132,12 @@ private:
// that is closed: [start_index, end_index]
void set_remainder_to_point_to_start_incl(size_t start, size_t end);
protected:
G1OffsetTableContigSpace* gsp() const { return _gsp; }
// Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
// memory first.
void zero_bottom_entry_raw();
// Variant of initialize_threshold that does not check for availability of the
// memory first.
HeapWord* initialize_threshold_raw();
inline size_t block_size(const HeapWord* p) const;
@ -263,9 +152,8 @@ protected:
// next block (or the end of the space.) Return the address of the
// beginning of the block that contains "addr". Does so without side
// effects (see, e.g., spec of block_start.)
inline HeapWord*
forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
const void* addr) const;
inline HeapWord* forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
const void* addr) const;
// "q" is a block boundary that is <= "addr"; return the address of the
// beginning of the block that contains "addr". May have side effects
@ -288,60 +176,26 @@ protected:
// starting at "*threshold_", and for any other indices crossed by the
// block. Updates "*threshold_" and "*index_" to correspond to the first
// index after the block end.
void alloc_block_work2(HeapWord** threshold_, size_t* index_,
HeapWord* blk_start, HeapWord* blk_end);
public:
// The space may not have it's bottom and top set yet, which is why the
// region is passed as a parameter. The elements of the array are
// initialized to zero.
G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
// Note: this ought to be part of the constructor, but that would require
// "this" to be passed as a parameter to a member constructor for
// the containing concrete subtype of Space.
// This would be legal C++, but MS VC++ doesn't allow it.
void set_space(G1OffsetTableContigSpace* sp);
// Resets the covered region to one with the same _bottom as before but
// the "new_word_size".
void resize(size_t new_word_size);
virtual HeapWord* block_start_unsafe(const void* addr);
virtual HeapWord* block_start_unsafe_const(const void* addr) const;
void alloc_block_work(HeapWord** threshold_, size_t* index_,
HeapWord* blk_start, HeapWord* blk_end);
void check_all_cards(size_t left_card, size_t right_card) const;
public:
// The elements of the array are initialized to zero.
G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp);
void verify() const;
virtual void print_on(outputStream* out) PRODUCT_RETURN;
};
// A subtype of BlockOffsetArray that takes advantage of the fact
// that its underlying space is a ContiguousSpace, so that its "active"
// region can be more efficiently tracked (than for a non-contiguous space).
class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
friend class VMStructs;
// allocation boundary at which offset array must be updated
HeapWord* _next_offset_threshold;
size_t _next_offset_index; // index corresponding to that boundary
// Work function to be called when allocation start crosses the next
// threshold in the contig space.
void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
blk_start, blk_end);
}
// Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
// memory first.
void zero_bottom_entry_raw();
// Variant of initialize_threshold that does not check for availability of the
// memory first.
HeapWord* initialize_threshold_raw();
public:
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
// Returns the address of the start of the block containing "addr", or
// else "null" if it is covered by no block. (May have side effects,
// namely updating of shared array entries that "point" too far
// backwards. This can occur, for example, when lab allocation is used
// in a space covered by the table.)
inline HeapWord* block_start(const void* addr);
// Same as above, but does not have any of the possible side effects
// discussed above.
inline HeapWord* block_start_const(const void* addr) const;
// Initialize the threshold to reflect the first boundary after the
// bottom of the covered region.
@ -362,19 +216,16 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
// never exceeds the "_next_offset_threshold".
void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
if (blk_end > _next_offset_threshold) {
alloc_block_work1(blk_start, blk_end);
alloc_block_work(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end);
}
}
void alloc_block(HeapWord* blk, size_t size) {
alloc_block(blk, blk+size);
}
HeapWord* block_start_unsafe(const void* addr);
HeapWord* block_start_unsafe_const(const void* addr) const;
void set_for_starts_humongous(HeapWord* obj_top, size_t fill_size);
virtual void print_on(outputStream* out) PRODUCT_RETURN;
void print_on(outputStream* out) PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_G1_G1BLOCKOFFSETTABLE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,34 +30,36 @@
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "gc/shared/space.hpp"
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
if (addr >= _bottom && addr < _end) {
return block_start_unsafe(addr);
inline HeapWord* G1BlockOffsetTablePart::block_start(const void* addr) {
if (addr >= _space->bottom() && addr < _space->end()) {
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
return forward_to_block_containing_addr(q, addr);
} else {
return NULL;
}
}
inline HeapWord*
G1BlockOffsetTable::block_start_const(const void* addr) const {
if (addr >= _bottom && addr < _end) {
return block_start_unsafe_const(addr);
inline HeapWord* G1BlockOffsetTablePart::block_start_const(const void* addr) const {
if (addr >= _space->bottom() && addr < _space->end()) {
HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
HeapWord* n = q + block_size(q);
return forward_to_block_containing_addr_const(q, n, addr);
} else {
return NULL;
}
}
u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
u_char G1BlockOffsetTable::offset_array(size_t index) const {
check_index(index, "index out of range");
return _offset_array[index];
}
void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
void G1BlockOffsetTable::set_offset_array(size_t index, u_char offset) {
check_index(index, "index out of range");
set_offset_array_raw(index, offset);
}
void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
void G1BlockOffsetTable::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
check_index(index, "index out of range");
assert(high >= low, "addresses out of order");
size_t offset = pointer_delta(high, low);
@ -65,7 +67,7 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, He
set_offset_array(index, (u_char)offset);
}
void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
void G1BlockOffsetTable::set_offset_array(size_t left, size_t right, u_char offset) {
check_index(right, "right index out of range");
assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1;
@ -73,11 +75,11 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_cha
}
// Variant of index_for that does not check the index for validity.
inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
inline size_t G1BlockOffsetTable::index_for_raw(const void* p) const {
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> BOTConstants::LogN;
}
inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
inline size_t G1BlockOffsetTable::index_for(const void* p) const {
char* pc = (char*)p;
assert(pc >= (char*)_reserved.start() &&
pc < (char*)_reserved.end(),
@ -88,8 +90,7 @@ inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
return result;
}
inline HeapWord*
G1BlockOffsetSharedArray::address_for_index(size_t index) const {
inline HeapWord* G1BlockOffsetTable::address_for_index(size_t index) const {
check_index(index, "index out of range");
HeapWord* result = address_for_index_raw(index);
assert(result >= _reserved.start() && result < _reserved.end(),
@ -99,47 +100,45 @@ G1BlockOffsetSharedArray::address_for_index(size_t index) const {
return result;
}
inline size_t
G1BlockOffsetArray::block_size(const HeapWord* p) const {
return gsp()->block_size(p);
inline size_t G1BlockOffsetTablePart::block_size(const HeapWord* p) const {
return _space->block_size(p);
}
inline HeapWord*
G1BlockOffsetArray::block_at_or_preceding(const void* addr,
bool has_max_index,
size_t max_index) const {
assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
size_t index = _array->index_for(addr);
inline HeapWord* G1BlockOffsetTablePart::block_at_or_preceding(const void* addr,
bool has_max_index,
size_t max_index) const {
assert(_bot->offset_array(0) == 0, "objects can't cross covered areas");
size_t index = _bot->index_for(addr);
// We must make sure that the offset table entry we use is valid. If
// "addr" is past the end, start at the last known one and go forward.
if (has_max_index) {
index = MIN2(index, max_index);
}
HeapWord* q = _array->address_for_index(index);
HeapWord* q = _bot->address_for_index(index);
uint offset = _array->offset_array(index); // Extend u_char to uint.
while (offset >= N_words) {
uint offset = _bot->offset_array(index); // Extend u_char to uint.
while (offset >= BOTConstants::N_words) {
// The excess of the offset from N_words indicates a power of Base
// to go back by.
size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
q -= (N_words * n_cards_back);
size_t n_cards_back = BOTConstants::entry_to_cards_back(offset);
q -= (BOTConstants::N_words * n_cards_back);
index -= n_cards_back;
offset = _array->offset_array(index);
offset = _bot->offset_array(index);
}
assert(offset < N_words, "offset too large");
assert(offset < BOTConstants::N_words, "offset too large");
q -= offset;
return q;
}
inline HeapWord*
G1BlockOffsetArray::
forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
const void* addr) const {
if (addr >= gsp()->top()) return gsp()->top();
inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
const void* addr) const {
if (addr >= _space->top()) return _space->top();
while (n <= addr) {
q = n;
oop obj = oop(q);
if (obj->klass_or_null() == NULL) return q;
if (obj->klass_or_null() == NULL) {
return q;
}
n += block_size(q);
}
assert(q <= n, "wrong order for q and addr");
@ -147,10 +146,11 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
return q;
}
inline HeapWord*
G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
const void* addr) {
if (oop(q)->klass_or_null() == NULL) return q;
inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr(HeapWord* q,
const void* addr) {
if (oop(q)->klass_or_null() == NULL) {
return q;
}
HeapWord* n = q + block_size(q);
// In the normal case, where the query "addr" is a card boundary, and the
// offset table chunks are the same size as cards, the block starting at

View File

@ -601,7 +601,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
return result;
}
if (GC_locker::is_active_and_needs_gc()) {
if (GCLocker::is_active_and_needs_gc()) {
if (g1_policy()->can_expand_young_list()) {
// No need for an ergo verbose message here,
// can_expand_young_list() does this when it returns true.
@ -617,7 +617,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// returns true). In this case we do not try this GC and
// wait until the GCLocker initiated GC is performed, and
// then retry the allocation.
if (GC_locker::needs_gc()) {
if (GCLocker::needs_gc()) {
should_try_gc = false;
} else {
// Read the GC count while still holding the Heap_lock.
@ -653,7 +653,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
GC_locker::stall_until_clear();
GCLocker::stall_until_clear();
(*gclocker_retry_count_ret) += 1;
}
@ -1028,7 +1028,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
return result;
}
if (GC_locker::is_active_and_needs_gc()) {
if (GCLocker::is_active_and_needs_gc()) {
should_try_gc = false;
} else {
// The GCLocker may not be active but the GCLocker initiated
@ -1036,7 +1036,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// returns true). In this case we do not try this GC and
// wait until the GCLocker initiated GC is performed, and
// then retry the allocation.
if (GC_locker::needs_gc()) {
if (GCLocker::needs_gc()) {
should_try_gc = false;
} else {
// Read the GC count while still holding the Heap_lock.
@ -1076,7 +1076,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
GC_locker::stall_until_clear();
GCLocker::stall_until_clear();
(*gclocker_retry_count_ret) += 1;
}
@ -1211,7 +1211,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
bool clear_all_soft_refs) {
assert_at_safepoint(true /* should_be_vm_thread */);
if (GC_locker::check_active_before_gc()) {
if (GCLocker::check_active_before_gc()) {
return false;
}
@ -1745,7 +1745,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_is_alive_closure_stw(this),
_ref_processor_cm(NULL),
_ref_processor_stw(NULL),
_bot_shared(NULL),
_bot(NULL),
_cg1r(NULL),
_g1mm(NULL),
_refine_cte_cl(NULL),
@ -1906,8 +1906,8 @@ jint G1CollectedHeap::initialize() {
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
G1RegionToSpaceMapper* bot_storage =
create_aux_memory_mapper("Block offset table",
G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
G1BlockOffsetSharedArray::heap_map_factor());
G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
G1BlockOffsetTable::heap_map_factor());
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* cardtable_storage =
@ -1945,7 +1945,7 @@ jint G1CollectedHeap::initialize() {
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
_bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
_bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
{
HeapWord* start = _hrm.reserved().start();
@ -1984,7 +1984,9 @@ jint G1CollectedHeap::initialize() {
DirtyCardQ_FL_lock,
concurrent_g1_refine()->yellow_zone(),
concurrent_g1_refine()->red_zone(),
Shared_DirtyCardQ_lock);
Shared_DirtyCardQ_lock,
NULL, // fl_owner
true); // init_free_ids
dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
DirtyCardQ_CBL_mon,
@ -2394,8 +2396,8 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
}
if (retry_gc) {
if (GC_locker::is_active_and_needs_gc()) {
GC_locker::stall_until_clear();
if (GCLocker::is_active_and_needs_gc()) {
GCLocker::stall_until_clear();
}
}
}
@ -2549,6 +2551,7 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
// Previous workers starting region is valid
// so let's iterate from there
start_ind = (cs_size * (worker_i - 1)) / active_workers;
OrderAccess::loadload();
result = _worker_cset_start_region[worker_i - 1];
}
@ -3533,6 +3536,16 @@ void G1CollectedHeap::register_humongous_regions_with_cset() {
cl.flush_rem_set_entries();
}
class VerifyRegionRemSetClosure : public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* hr) {
if (!hr->is_archive() && !hr->is_continues_humongous()) {
hr->verify_rem_set();
}
return false;
}
};
#ifdef ASSERT
class VerifyCSetClosure: public HeapRegionClosure {
public:
@ -3627,7 +3640,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert_at_safepoint(true /* should_be_vm_thread */);
guarantee(!is_gc_active(), "collection is not reentrant");
if (GC_locker::check_active_before_gc()) {
if (GCLocker::check_active_before_gc()) {
return false;
}
@ -3722,6 +3735,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
increment_total_collections(false /* full gc */);
increment_gc_time_stamp();
if (VerifyRememberedSets) {
log_info(gc, verify)("[Verifying RemSets before GC]");
VerifyRegionRemSetClosure v_cl;
heap_region_iterate(&v_cl);
}
verify_before_gc();
check_bitmaps("GC Start");
@ -3926,6 +3945,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// scanning cards (see CR 7039627).
increment_gc_time_stamp();
if (VerifyRememberedSets) {
log_info(gc, verify)("[Verifying RemSets after GC]");
VerifyRegionRemSetClosure v_cl;
heap_region_iterate(&v_cl);
}
verify_after_gc();
check_bitmaps("GC End");

View File

@ -154,7 +154,7 @@ private:
uint _expansion_regions;
// The block offset table for the G1 heap.
G1BlockOffsetSharedArray* _bot_shared;
G1BlockOffsetTable* _bot;
// Tears down the region sets / lists so that they are empty and the
// regions on the heap do not belong to a region set / list. The
@ -1008,7 +1008,7 @@ public:
void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
// The shared block offset table array.
G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
G1BlockOffsetTable* bot() const { return _bot; }
// Reference Processing accessors

View File

@ -36,5 +36,5 @@ bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
return new HeapRegion(hrs_index, bot_shared(), mr);
return new HeapRegion(hrs_index, bot(), mr);
}

View File

@ -841,7 +841,7 @@ private:
HeapRegion* _recorded_survivor_head;
HeapRegion* _recorded_survivor_tail;
ageTable _survivors_age_table;
AgeTable _survivors_age_table;
public:
uint tenuring_threshold() const { return _tenuring_threshold; }
@ -882,7 +882,7 @@ public:
return _recorded_survivor_regions;
}
void record_age_table(ageTable* age_table) {
void record_age_table(AgeTable* age_table) {
_survivors_age_table.merge(age_table);
}

View File

@ -50,7 +50,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
G1PLABAllocator* _plab_allocator;
ageTable _age_table;
AgeTable _age_table;
InCSetState _dest[InCSetState::Num];
// Local tenuring threshold.
uint _tenuring_threshold;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -97,7 +97,7 @@ ScanRSClosure::ScanRSClosure(G1ParPushHeapRSClosure* oc,
_worker_i(worker_i),
_try_claimed(false) {
_g1h = G1CollectedHeap::heap();
_bot_shared = _g1h->bot_shared();
_bot = _g1h->bot();
_ct_bs = _g1h->g1_barrier_set();
_block_size = MAX2<size_t>(G1RSetScanBlockSize, 1);
}
@ -109,7 +109,7 @@ void ScanRSClosure::scanCard(size_t index, HeapRegion *r) {
// Set the "from" region in the closure.
_oc->set_region(r);
MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
MemRegion card_region(_bot->address_for_index(index), BOTConstants::N_words);
MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
MemRegion mr = pre_gc_allocated.intersection(card_region);
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
@ -153,7 +153,7 @@ bool ScanRSClosure::doHeapRegion(HeapRegion* r) {
jump_to_card = hrrs->iter_claimed_next(_block_size);
}
if (current_card < jump_to_card) continue;
HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
HeapWord* card_start = _g1h->bot()->address_for_index(card_index);
HeapRegion* card_region = _g1h->heap_region_containing(card_start);
_cards++;

Some files were not shown because too many files have changed in this diff Show More