This commit is contained in:
Lana Steuck 2013-11-25 09:27:14 -08:00
commit 4f265c99f7
1166 changed files with 74735 additions and 82032 deletions

View File

@ -237,3 +237,5 @@ ad67c34f79c28a8e755f4a49f313868619d6702c jdk8-b112
4a4dbcf7cb7d3e1a81beaa3b11cd909f69ebc79a jdk8-b113
dfa34ab293faad9b543a24646dbb381bc3ab5586 jdk8-b114
3dd9732b17034f45d111996d1d50287b05a3998c jdk8-b115
aaf663f591aba43ec942263b15ba62759ce26a1e jdk8-b116
31b0e03fcad73d7886b306b4c2e57ad270780d0d jdk8-b117

View File

@ -237,3 +237,5 @@ d086227bfc45d124f09b3bd72a07956b4073bf71 jdk8-b111
6ba4c7cb623ec612031e05cf8bf279d8f407bd1e jdk8-b113
4f2011496393a26dcfd7b1f7787a3673ddd32599 jdk8-b114
763ada2a1d8c5962bc8c3d297e57c562d2e95338 jdk8-b115
cbfe5da942c63ef865cab4a7159e01eff7d7fcf5 jdk8-b116
a4afb0a8d55ef75aef5b0d77b434070468fb89f8 jdk8-b117

View File

@ -237,3 +237,5 @@ a4bb3b4500164748a9c33b2283cfda76d89f25ab jdk8-b108
a259ff3e42d91da68f4d4f09d7eb9dc22bc024fc jdk8-b113
0bbccf77c23e566170b88b52c2cf28e5d31ce927 jdk8-b114
8d07115924b7d703a5048adb24e8aba751442f13 jdk8-b115
5fdc4465208933ba704825b2b05e1afd062235fb jdk8-b116
e53d1ee4d2ae898f1cf58688d45a5afe7c482173 jdk8-b117

View File

@ -393,3 +393,7 @@ f6962730bbde82f279a0ae3a1c14bc5e58096c6e jdk8-b111
3b32d287da89a47a45d16f6d9ba5bd3cd9bf4b3e hs25-b57
9ebaac78a8a0061fb9597e07f806498cb626cdeb jdk8-b115
e510dfdec6dd701410f3398ed86ebcdff0cca63a hs25-b58
52b076e6ffae247c1c7d8b7aba995195be2b6fc2 jdk8-b116
c78d517c7ea47501b456e707afd4b78e7b5b202e hs25-b59
f573d00213b7170c2ff856f9cd83cd148437f5b9 jdk8-b117
abad3b2d905d9e1ad767c94baa94aba6ed5b207b hs25-b60

View File

@ -24,8 +24,9 @@
package sun.jvm.hotspot.tools;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.debugger.JVMDebugger;
import sun.jvm.hotspot.runtime.Arguments;
import sun.jvm.hotspot.runtime.VM;
public class JInfo extends Tool {
public JInfo() {
@ -138,14 +139,33 @@ public class JInfo extends Tool {
}
private void printVMFlags() {
VM.Flag[] flags = VM.getVM().getCommandLineFlags();
System.out.print("Non-default VM flags: ");
for (VM.Flag flag : flags) {
if (flag.getOrigin() == 0) {
// only print flags which aren't their defaults
continue;
}
if (flag.isBool()) {
String onoff = flag.getBool() ? "+" : "-";
System.out.print("-XX:" + onoff + flag.getName() + " ");
} else {
System.out.print("-XX:" + flag.getName() + "="
+ flag.getValue() + " ");
}
}
System.out.println();
System.out.print("Command line: ");
String str = Arguments.getJVMFlags();
if (str != null) {
System.out.println(str);
System.out.print(str + " ");
}
str = Arguments.getJVMArgs();
if (str != null) {
System.out.println(str);
System.out.print(str);
}
System.out.println();
}
private int mode;

View File

@ -25,11 +25,11 @@
package sun.jvm.hotspot.tools;
import java.io.PrintStream;
import java.util.Hashtable;
import sun.jvm.hotspot.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.HotSpotAgent;
import sun.jvm.hotspot.debugger.DebuggerException;
import sun.jvm.hotspot.debugger.JVMDebugger;
import sun.jvm.hotspot.runtime.VM;
// generic command line or GUI tool.
// override run & code main as shown below.
@ -147,6 +147,7 @@ public abstract class Tool implements Runnable {
}
PrintStream err = System.err;
PrintStream out = System.out;
int pid = 0;
String coreFileName = null;
@ -180,18 +181,18 @@ public abstract class Tool implements Runnable {
try {
switch (debugeeType) {
case DEBUGEE_PID:
err.println("Attaching to process ID " + pid + ", please wait...");
out.println("Attaching to process ID " + pid + ", please wait...");
agent.attach(pid);
break;
case DEBUGEE_CORE:
err.println("Attaching to core " + coreFileName +
out.println("Attaching to core " + coreFileName +
" from executable " + executableName + ", please wait...");
agent.attach(executableName, coreFileName);
break;
case DEBUGEE_REMOTE:
err.println("Attaching to remote server " + remoteServer + ", please wait...");
out.println("Attaching to remote server " + remoteServer + ", please wait...");
agent.attach(remoteServer);
break;
}
@ -218,7 +219,7 @@ public abstract class Tool implements Runnable {
return 1;
}
err.println("Debugger attached successfully.");
out.println("Debugger attached successfully.");
startInternal();
return 0;
}
@ -237,14 +238,14 @@ public abstract class Tool implements Runnable {
// Remains of the start mechanism, common to both start methods.
private void startInternal() {
PrintStream err = System.err;
PrintStream out = System.out;
VM vm = VM.getVM();
if (vm.isCore()) {
err.println("Core build detected.");
out.println("Core build detected.");
} else if (vm.isClientCompiler()) {
err.println("Client compiler detected.");
out.println("Client compiler detected.");
} else if (vm.isServerCompiler()) {
err.println("Server compiler detected.");
out.println("Server compiler detected.");
} else {
throw new RuntimeException("Fatal error: "
+ "should have been able to detect core/C1/C2 build");
@ -252,8 +253,8 @@ public abstract class Tool implements Runnable {
String version = vm.getVMRelease();
if (version != null) {
err.print("JVM version is ");
err.println(version);
out.print("JVM version is ");
out.println(version);
}
run();

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=58
HS_BUILD_NUMBER=60
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -1002,18 +1002,6 @@ void AdapterGenerator::gen_i2c_adapter(
// and the vm will find there should this case occur.
Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
__ st_ptr(G5_method, callee_target_addr);
if (StressNonEntrant) {
// Open a big window for deopt failure
__ save_frame(0);
__ mov(G0, L0);
Label loop;
__ bind(loop);
__ sub(L0, 1, L0);
__ br_null_short(L0, Assembler::pt, loop);
__ restore();
}
__ jmpl(G3, 0, G0);
__ delayed()->nop();
}

View File

@ -3001,6 +3001,10 @@ void SharedRuntime::generate_deopt_blob() {
// sp should be pointing at the return address to the caller (3)
// Pick up the initial fp we should save
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Stack bang to make sure there's enough room for these interpreter frames.
if (UseStackBanging) {
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
@ -3020,9 +3024,6 @@ void SharedRuntime::generate_deopt_blob() {
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ movl(counter, rbx);
// Pick up the initial fp we should save
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
// frame and the stack walking of interpreter_sender will get the unextended sp
@ -3220,6 +3221,10 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// sp should be pointing at the return address to the caller (3)
// Pick up the initial fp we should save
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Stack bang to make sure there's enough room for these interpreter frames.
if (UseStackBanging) {
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
@ -3240,9 +3245,6 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ movl(counter, rbx);
// Pick up the initial fp we should save
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
// frame and the stack walking of interpreter_sender will get the unextended sp

View File

@ -3471,6 +3471,10 @@ void SharedRuntime::generate_deopt_blob() {
// rsp should be pointing at the return address to the caller (3)
// Pick up the initial fp we should save
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Stack bang to make sure there's enough room for these interpreter frames.
if (UseStackBanging) {
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
@ -3489,9 +3493,6 @@ void SharedRuntime::generate_deopt_blob() {
// Load counter into rdx
__ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
// Pick up the initial fp we should save
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
// frame and the stack walking of interpreter_sender will get the unextended sp
@ -3663,6 +3664,10 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// rsp should be pointing at the return address to the caller (3)
// Pick up the initial fp we should save
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Stack bang to make sure there's enough room for these interpreter frames.
if (UseStackBanging) {
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
@ -3670,27 +3675,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
}
// Load address of array of frame pcs into rcx (address*)
__ movptr(rcx,
Address(rdi,
Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
// Trash the return pc
__ addptr(rsp, wordSize);
// Load address of array of frame sizes into rsi (intptr_t*)
__ movptr(rsi, Address(rdi,
Deoptimization::UnrollBlock::
frame_sizes_offset_in_bytes()));
__ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
// Counter
__ movl(rdx, Address(rdi,
Deoptimization::UnrollBlock::
number_of_frames_offset_in_bytes())); // (int)
// Pick up the initial fp we should save
__ movptr(rbp,
Address(rdi,
Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
// Now adjust the caller's stack to make up for the extra locals but
// record the original sp so that we can save it in the skeletal
@ -3700,9 +3694,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
const Register sender_sp = r8;
__ mov(sender_sp, rsp);
__ movl(rbx, Address(rdi,
Deoptimization::UnrollBlock::
caller_adjustment_offset_in_bytes())); // (int)
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
__ subptr(rsp, rbx);
// Push interpreter frames in a loop

View File

@ -4338,6 +4338,11 @@ void GraphBuilder::print_stats() {
#endif // PRODUCT
void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
// A default method's holder is an interface
if (known_holder != NULL && known_holder->is_interface()) {
assert(known_holder->is_instance_klass() && ((ciInstanceKlass*)known_holder)->has_default_methods(), "should be default method");
known_holder = NULL;
}
append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
}

View File

@ -2574,8 +2574,25 @@ void LIRGenerator::do_Goto(Goto* x) {
__ jump(x->default_sux());
}
ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) {
/**
* Emit profiling code if needed for arguments, parameters, return value types
*
* @param md MDO the code will update at runtime
* @param md_base_offset common offset in the MDO for this profile and subsequent ones
* @param md_offset offset in the MDO (on top of md_base_offset) for this profile
* @param profiled_k current profile
* @param obj IR node for the object to be profiled
* @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
* Set once we find an update to make and use for next ones.
* @param not_null true if we know obj cannot be null
* @param signature_at_call_k signature at call for obj
* @param callee_signature_k signature of callee for obj
* at call and callee signatures differ at method handle call
* @return the only klass we know will ever be seen at this profile point
*/
ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
ciKlass* callee_signature_k) {
ciKlass* result = NULL;
bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
bool do_update = !TypeEntries::is_type_unknown(profiled_k);
@ -2590,9 +2607,9 @@ ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, in
if (do_update) {
// try to find exact type, using CHA if possible, so that loading
// the klass from the object can be avoided
ciType* type = arg->exact_type();
ciType* type = obj->exact_type();
if (type == NULL) {
type = arg->declared_type();
type = obj->declared_type();
type = comp->cha_exact_type(type);
}
assert(type == NULL || type->is_klass(), "type should be class");
@ -2608,23 +2625,33 @@ ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, in
ciKlass* exact_signature_k = NULL;
if (do_update) {
// Is the type from the signature exact (the only one possible)?
exact_signature_k = signature_k->exact_klass();
exact_signature_k = signature_at_call_k->exact_klass();
if (exact_signature_k == NULL) {
exact_signature_k = comp->cha_exact_type(signature_k);
exact_signature_k = comp->cha_exact_type(signature_at_call_k);
} else {
result = exact_signature_k;
do_update = false;
// Known statically. No need to emit any code: prevent
// LIR_Assembler::emit_profile_type() from emitting useless code
profiled_k = ciTypeEntries::with_status(result, profiled_k);
}
if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
assert(exact_klass == NULL, "arg and signature disagree?");
assert(exact_klass == NULL, "obj and signature disagree?");
// sometimes the type of the signature is better than the best type
// the compiler has
exact_klass = exact_signature_k;
do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
}
if (callee_signature_k != NULL &&
callee_signature_k != signature_at_call_k) {
ciKlass* improved_klass = callee_signature_k->exact_klass();
if (improved_klass == NULL) {
improved_klass = comp->cha_exact_type(callee_signature_k);
}
if (improved_klass != NULL && exact_klass != improved_klass) {
assert(exact_klass == NULL, "obj and signature disagree?");
exact_klass = exact_signature_k;
}
}
do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
}
if (!do_null && !do_update) {
@ -2640,7 +2667,7 @@ ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, in
__ leal(LIR_OprFact::address(base_type_address), mdp);
}
}
LIRItem value(arg, this);
LIRItem value(obj, this);
value.load_item();
__ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
@ -2665,9 +2692,9 @@ void LIRGenerator::profile_parameters(Base* x) {
if (t == T_OBJECT || t == T_ARRAY) {
intptr_t profiled_k = parameters->type(j);
Local* local = x->state()->local_at(java_index)->as_Local();
ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
profiled_k, local, mdp, false, local->declared_type()->as_klass());
ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
// If the profile is known statically set it once for all and do not emit any code
if (exact != NULL) {
md->set_parameter_type(j, exact);
@ -3129,19 +3156,28 @@ void LIRGenerator::profile_arguments(ProfileCall* x) {
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
int start = 0;
int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
if (x->nb_profiled_args() < stop) {
// if called through method handle invoke, some arguments may have been popped
stop = x->nb_profiled_args();
if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
// first argument is not profiled at call (method handle invoke)
assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
start = 1;
}
ciSignature* sig = x->callee()->signature();
ciSignature* callee_signature = x->callee()->signature();
// method handle call to virtual method
bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
for (int i = 0; i < stop; i++) {
ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
bool ignored_will_link;
ciSignature* signature_at_call = NULL;
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
ciSignatureStream signature_at_call_stream(signature_at_call);
// if called through method handle invoke, some arguments may have been popped
for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
ciKlass* exact = profile_arg_type(md, base_offset, off,
args->type(i), x->profiled_arg_at(i+start), mdp,
!x->arg_needs_null_check(i+start), sig_stream.next_klass());
ciKlass* exact = profile_type(md, base_offset, off,
args->type(i), x->profiled_arg_at(i+start), mdp,
!x->arg_needs_null_check(i+start),
signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
if (exact != NULL) {
md->set_argument_type(bci, i, exact);
}
@ -3176,8 +3212,8 @@ void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
int bci = x->bci_of_invoke();
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
// The first parameter is the receiver so that's what we start
// with if it exists. On exception if method handle call to
// virtual method has receiver in the args list
// with if it exists. One exception is method handle call to
// virtual method: the receiver is in the args list
if (arg == NULL || !Bytecodes::has_receiver(bc)) {
i = 1;
arg = x->profiled_arg_at(0);
@ -3186,9 +3222,9 @@ void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
int k = 0; // to iterate on the profile data
for (;;) {
intptr_t profiled_k = parameters->type(k);
ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
profiled_k, arg, mdp, not_null, sig_stream.next_klass());
ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
// If the profile is known statically set it once for all and do not emit any code
if (exact != NULL) {
md->set_parameter_type(k, exact);
@ -3247,9 +3283,16 @@ void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
LIR_Opr mdp = LIR_OprFact::illegalOpr;
ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
ret->type(), x->ret(), mdp,
!x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
bool ignored_will_link;
ciSignature* signature_at_call = NULL;
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
ciKlass* exact = profile_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
ret->type(), x->ret(), mdp,
!x->needs_null_check(),
signature_at_call->return_type()->as_klass(),
x->callee()->signature()->return_type()->as_klass());
if (exact != NULL) {
md->set_return_type(bci, exact);
}

View File

@ -434,7 +434,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_ThreadIDIntrinsic(Intrinsic* x);
void do_ClassIDIntrinsic(Intrinsic* x);
#endif
ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
ciKlass* callee_signature_k);
void profile_arguments(ProfileCall* x);
void profile_parameters(Base* x);
void profile_parameters_at_call(ProfileCall* x);

View File

@ -341,9 +341,6 @@
diagnostic(bool, C1PatchInvokeDynamic, true, \
"Patch invokedynamic appendix not known at compile time") \
\
develop(intx, MaxForceInlineLevel, 100, \
"maximum number of nested @ForceInline calls that are inlined") \
\
// Read default values for c1 globals

View File

@ -935,7 +935,9 @@ void ciEnv::register_method(ciMethod* target,
// Prevent SystemDictionary::add_to_hierarchy from running
// and invalidating our dependencies until we install this method.
// No safepoints are allowed. Otherwise, class redefinition can occur in between.
MutexLocker ml(Compile_lock);
No_Safepoint_Verifier nsv;
// Change in Jvmti state may invalidate compilation.
if (!failing() &&
@ -1001,31 +1003,15 @@ void ciEnv::register_method(ciMethod* target,
// Free codeBlobs
code_buffer->free_blob();
// stress test 6243940 by immediately making the method
// non-entrant behind the system's back. This has serious
// side effects on the code cache and is not meant for
// general stress testing
if (nm != NULL && StressNonEntrant) {
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
NativeJump::patch_verified_entry(nm->entry_point(), nm->verified_entry_point(),
SharedRuntime::get_handle_wrong_method_stub());
}
if (nm == NULL) {
// The CodeCache is full. Print out warning and disable compilation.
record_failure("code cache is full");
{
MutexUnlocker ml(Compile_lock);
MutexUnlocker locker(MethodCompileQueue_lock);
CompileBroker::handle_full_code_cache();
}
} else {
if (nm != NULL) {
nm->set_has_unsafe_access(has_unsafe_access);
nm->set_has_wide_vectors(has_wide_vectors);
// Record successful registration.
// (Put nm into the task handle *before* publishing to the Java heap.)
if (task() != NULL) task()->set_code(nm);
if (task() != NULL) {
task()->set_code(nm);
}
if (entry_bci == InvocationEntryBci) {
if (TieredCompilation) {
@ -1036,11 +1022,11 @@ void ciEnv::register_method(ciMethod* target,
char *method_name = method->name_and_sig_as_C_string();
tty->print_cr("Replacing method %s", method_name);
}
if (old != NULL ) {
if (old != NULL) {
old->make_not_entrant();
}
}
if (TraceNMethodInstalls ) {
if (TraceNMethodInstalls) {
ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string();
ttyLocker ttyl;
@ -1051,7 +1037,7 @@ void ciEnv::register_method(ciMethod* target,
// Allow the code to be executed
method->set_code(method, nm);
} else {
if (TraceNMethodInstalls ) {
if (TraceNMethodInstalls) {
ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string();
ttyLocker ttyl;
@ -1061,15 +1047,18 @@ void ciEnv::register_method(ciMethod* target,
entry_bci);
}
method->method_holder()->add_osr_nmethod(nm);
}
}
}
// JVMTI -- compiled method notification (must be done outside lock)
if (nm != NULL) {
nm->post_compiled_method_load_event();
}
} // safepoints are allowed again
if (nm != NULL) {
// JVMTI -- compiled method notification (must be done outside lock)
nm->post_compiled_method_load_event();
} else {
// The CodeCache is full. Print out warning and disable compilation.
record_failure("code cache is full");
CompileBroker::handle_full_code_cache();
}
}

View File

@ -77,7 +77,9 @@ public:
static ciKlass* valid_ciklass(intptr_t k) {
if (!TypeEntries::is_type_none(k) &&
!TypeEntries::is_type_unknown(k)) {
return (ciKlass*)TypeEntries::klass_part(k);
ciKlass* res = (ciKlass*)TypeEntries::klass_part(k);
assert(res != NULL, "invalid");
return res;
} else {
return NULL;
}

View File

@ -4080,7 +4080,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
// Generate any default methods - default methods are interface methods
// that have a default implementation. This is new with Lambda project.
if (has_default_methods && !access_flags.is_interface() ) {
if (has_default_methods ) {
DefaultMethods::generate_default_methods(
this_klass(), &all_mirandas, CHECK_(nullHandle));
}

View File

@ -171,8 +171,12 @@ class HierarchyVisitor : StackObj {
}
bool is_cancelled() const { return _cancelled; }
// This code used to skip interface classes because their only
// superclass was j.l.Object which would be also covered by class
// superclass hierarchy walks. Now that the starting point can be
// an interface, we must ensure we catch j.l.Object as the super.
static bool has_super(InstanceKlass* cls) {
return cls->super() != NULL && !cls->is_interface();
return cls->super() != NULL;
}
Node* node_at_depth(int i) const {
@ -391,16 +395,21 @@ class MethodFamily : public ResourceObj {
return;
}
// Qualified methods are maximally-specific methods
// These include public, instance concrete (=default) and abstract methods
GrowableArray<Method*> qualified_methods;
int num_defaults = 0;
int default_index = -1;
int qualified_index = -1;
for (int i = 0; i < _members.length(); ++i) {
Pair<Method*,QualifiedState> entry = _members.at(i);
if (entry.second == QUALIFIED) {
qualified_methods.append(entry.first);
default_index++;
qualified_index++;
if (entry.first->is_default_method()) {
num_defaults++;
default_index = qualified_index;
}
}
}
@ -408,16 +417,10 @@ class MethodFamily : public ResourceObj {
if (qualified_methods.length() == 0) {
_exception_message = generate_no_defaults_message(CHECK);
_exception_name = vmSymbols::java_lang_AbstractMethodError();
} else if (qualified_methods.length() == 1) {
// leave abstract methods alone, they will be found via normal search path
Method* method = qualified_methods.at(0);
if (!method->is_abstract()) {
_selected_target = qualified_methods.at(0);
}
// If only one qualified method is default, select that
// If only one qualified method is default, select that
} else if (num_defaults == 1) {
_selected_target = qualified_methods.at(default_index);
} else {
} else if (num_defaults > 1) {
_exception_message = generate_conflicts_message(&qualified_methods,CHECK);
_exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
if (TraceDefaultMethods) {
@ -425,6 +428,7 @@ class MethodFamily : public ResourceObj {
tty->print_cr("");
}
}
// leave abstract methods alone, they will be found via normal search path
}
bool contains_signature(Symbol* query) {
@ -704,8 +708,10 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
Method* m = iklass->find_method(_method_name, _method_signature);
// private interface methods are not candidates for default methods
// invokespecial to private interface methods doesn't use default method logic
// The overpasses are your supertypes' errors, we do not include them
// future: take access controls into account for superclass methods
if (m != NULL && !m->is_static() && (!iklass->is_interface() || m->is_public())) {
if (m != NULL && !m->is_static() && !m->is_overpass() &&
(!iklass->is_interface() || m->is_public())) {
if (_family == NULL) {
_family = new StatefulMethodFamily();
}
@ -781,7 +787,8 @@ void DefaultMethods::generate_default_methods(
#ifndef PRODUCT
if (TraceDefaultMethods) {
ResourceMark rm; // be careful with these!
tty->print_cr("Class %s requires default method processing",
tty->print_cr("%s %s requires default method processing",
klass->is_interface() ? "Interface" : "Class",
klass->name()->as_klass_external_name());
PrintHierarchy printer;
printer.run(klass);
@ -806,7 +813,7 @@ void DefaultMethods::generate_default_methods(
}
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr("Creating overpasses...");
tty->print_cr("Creating defaults and overpasses...");
}
#endif // ndef PRODUCT
@ -1076,7 +1083,9 @@ static void merge_in_new_methods(InstanceKlass* klass,
klass->set_initial_method_idnum(new_size);
ClassLoaderData* cld = klass->class_loader_data();
MetadataFactory::free_array(cld, original_methods);
if (original_methods ->length() > 0) {
MetadataFactory::free_array(cld, original_methods);
}
if (original_ordering->length() > 0) {
klass->set_method_ordering(merged_ordering);
MetadataFactory::free_array(cld, original_ordering);

View File

@ -30,6 +30,7 @@
#include "prims/jvmtiImpl.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.hpp"
#include "services/threadService.hpp"
#include "utilities/growableArray.hpp"
@ -50,6 +51,7 @@ MetadataOnStackMark::MetadataOnStackMark() {
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
CompileBroker::mark_on_stack();
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
ThreadService::metadata_do(Metadata::mark_on_stack);
}
MetadataOnStackMark::~MetadataOnStackMark() {

View File

@ -141,7 +141,6 @@ class SymbolPropertyTable;
/* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
do_klass(lambda_MagicLambdaImpl_klass, java_lang_invoke_MagicLambdaImpl, Opt ) \
do_klass(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt ) \
do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \

View File

@ -188,10 +188,8 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
Symbol* name = klass->name();
Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
Klass* lambda_magic_klass = SystemDictionary::lambda_MagicLambdaImpl_klass();
bool is_reflect = refl_magic_klass != NULL && klass->is_subtype_of(refl_magic_klass);
bool is_lambda = lambda_magic_klass != NULL && klass->is_subtype_of(lambda_magic_klass);
return (should_verify_for(klass->class_loader(), should_verify_class) &&
// return if the class is a bootstrapping class
@ -215,9 +213,7 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool shou
// NOTE: this is called too early in the bootstrapping process to be
// guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
// Also for lambda generated code, gte jdk8
(!is_reflect || VerifyReflectionBytecodes) &&
(!is_lambda || VerifyLambdaBytecodes)
);
(!is_reflect || VerifyReflectionBytecodes));
}
Symbol* Verifier::inference_verify(

View File

@ -273,7 +273,6 @@
template(java_lang_invoke_Stable_signature, "Ljava/lang/invoke/Stable;") \
template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \
template(java_lang_invoke_MagicLambdaImpl, "java/lang/invoke/MagicLambdaImpl") \
/* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \

View File

@ -618,21 +618,18 @@ nmethod* nmethod::new_nmethod(methodHandle method,
// record this nmethod as dependent on this klass
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
}
}
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
if (PrintAssembly && nm != NULL) {
Disassembler::decode(nm);
NOT_PRODUCT(nmethod_stats.note_nmethod(nm));
if (PrintAssembly) {
Disassembler::decode(nm);
}
}
}
// verify nmethod
debug_only(if (nm) nm->verify();) // might block
// Do verification and logging outside CodeCache_lock.
if (nm != NULL) {
// Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
DEBUG_ONLY(nm->verify();)
nm->log_new_nmethod();
}
// done
return nm;
}
@ -1262,7 +1259,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
set_osr_link(NULL);
//set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
NMethodSweeper::notify();
NMethodSweeper::report_state_change(this);
}
void nmethod::invalidate_osr_method() {
@ -1296,7 +1293,9 @@ void nmethod::log_state_change() const {
}
}
// Common functionality for both make_not_entrant and make_zombie
/**
* Common functionality for both make_not_entrant and make_zombie
*/
bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
assert(!is_zombie(), "should not already be a zombie");
@ -1420,9 +1419,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
}
// Make sweeper aware that there is a zombie method that needs to be removed
NMethodSweeper::notify();
NMethodSweeper::report_state_change(this);
return true;
}
@ -2395,20 +2392,23 @@ void nmethod::verify() {
void nmethod::verify_interrupt_point(address call_site) {
// This code does not work in release mode since
// owns_lock only is available in debug mode.
CompiledIC* ic = NULL;
Thread *cur = Thread::current();
if (CompiledIC_lock->owner() == cur ||
((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
SafepointSynchronize::is_at_safepoint())) {
ic = CompiledIC_at(this, call_site);
CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
} else {
MutexLocker ml_verify (CompiledIC_lock);
ic = CompiledIC_at(this, call_site);
// Verify IC only when nmethod installation is finished.
bool is_installed = (method()->code() == this) // nmethod is in state 'alive' and installed
|| !this->is_in_use(); // nmethod is installed, but not in 'alive' state
if (is_installed) {
Thread *cur = Thread::current();
if (CompiledIC_lock->owner() == cur ||
((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
SafepointSynchronize::is_at_safepoint())) {
CompiledIC_at(this, call_site);
CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
} else {
MutexLocker ml_verify (CompiledIC_lock);
CompiledIC_at(this, call_site);
}
}
PcDesc* pd = pc_desc_at(ic->end_of_call());
PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
assert(pd != NULL, "PcDesc must exist");
for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
pd->obj_decode_offset(), pd->should_reexecute(),

View File

@ -126,6 +126,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
bool CompileBroker::_initialized = false;
volatile bool CompileBroker::_should_block = false;
volatile jint CompileBroker::_print_compilation_warning = 0;
volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
// The installed compiler(s)
@ -2027,11 +2028,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
#endif
}
// ------------------------------------------------------------------
// CompileBroker::handle_full_code_cache
//
// The CodeCache is full. Print out warning and disable compilation or
// try code cache cleaning so compilation can continue later.
/**
* The CodeCache is full. Print out warning and disable compilation
* or try code cache cleaning so compilation can continue later.
*/
void CompileBroker::handle_full_code_cache() {
UseInterpreter = true;
if (UseCompiler || AlwaysCompileLoopMethods ) {
@ -2048,12 +2048,9 @@ void CompileBroker::handle_full_code_cache() {
xtty->stamp();
xtty->end_elem();
}
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
CodeCache::report_codemem_full();
#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
codecache_print(/* detailed= */ true);
@ -2066,17 +2063,22 @@ void CompileBroker::handle_full_code_cache() {
// Since code cache is full, immediately stop new compiles
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
NMethodSweeper::log_sweep("disable_compiler");
// Switch to 'vm_state'. This ensures that possibly_sweep() can be called
// without having to consider the state in which the current thread is.
ThreadInVMfromUnknown in_vm;
NMethodSweeper::possibly_sweep();
}
// Switch to 'vm_state'. This ensures that possibly_sweep() can be called
// without having to consider the state in which the current thread is.
ThreadInVMfromUnknown in_vm;
NMethodSweeper::possibly_sweep();
} else {
disable_compilation_forever();
}
// Print warning only once
if (should_print_compiler_warning()) {
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
codecache_print(/* detailed= */ true);
}
}
codecache_print(/* detailed= */ true);
}
// ------------------------------------------------------------------

View File

@ -315,6 +315,8 @@ class CompileBroker: AllStatic {
static int _sum_nmethod_code_size;
static long _peak_compilation_time;
static volatile jint _print_compilation_warning;
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
@ -418,7 +420,11 @@ class CompileBroker: AllStatic {
return _should_compile_new_jobs == shutdown_compilaton;
}
static void handle_full_code_cache();
// Ensures that warning is only printed once.
static bool should_print_compiler_warning() {
jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
return old == 0;
}
// Return total compilation ticks
static jlong total_compilation_ticks() {
return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0;

View File

@ -47,8 +47,9 @@
// ConcurrentMarkSweepPolicy methods
//
ConcurrentMarkSweepPolicy::ConcurrentMarkSweepPolicy() {
initialize_all();
void ConcurrentMarkSweepPolicy::initialize_alignments() {
_space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
_heap_alignment = compute_heap_alignment();
}
void ConcurrentMarkSweepPolicy::initialize_generations() {

View File

@ -29,10 +29,11 @@
class ConcurrentMarkSweepPolicy : public TwoGenerationCollectorPolicy {
protected:
void initialize_alignments();
void initialize_generations();
public:
ConcurrentMarkSweepPolicy();
ConcurrentMarkSweepPolicy() {}
ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }

View File

@ -594,9 +594,9 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
_completed_initialization(false),
_collector_policy(cp),
_should_unload_classes(false),
_should_unload_classes(CMSClassUnloadingEnabled),
_concurrent_cycles_since_last_unload(0),
_roots_scanning_options(0),
_roots_scanning_options(SharedHeap::SO_None),
_inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
@ -788,14 +788,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
&& _survivor_chunk_index == 0),
"Error");
// Choose what strong roots should be scanned depending on verification options
if (!CMSClassUnloadingEnabled) {
// If class unloading is disabled we want to include all classes into the root set.
add_root_scanning_option(SharedHeap::SO_AllClasses);
} else {
add_root_scanning_option(SharedHeap::SO_SystemClasses);
}
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
_gc_counters = new CollectorCounters("CMS", 1);
_completed_initialization = true;
@ -2532,6 +2524,9 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
// Snapshot the soft reference policy to be used in this collection cycle.
ref_processor()->setup_policy(clear_all_soft_refs);
// Decide if class unloading should be done
update_should_unload_classes();
bool init_mark_was_synchronous = false; // until proven otherwise
while (_collectorState != Idling) {
if (TraceCMSState) {
@ -3310,7 +3305,10 @@ void CMSCollector::setup_cms_unloading_and_verification_state() {
|| VerifyBeforeExit;
const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
// We set the proper root for this CMS cycle here.
if (should_unload_classes()) { // Should unload classes this cycle
remove_root_scanning_option(SharedHeap::SO_AllClasses);
add_root_scanning_option(SharedHeap::SO_SystemClasses);
remove_root_scanning_option(rso); // Shrink the root set appropriately
set_verifying(should_verify); // Set verification state for this cycle
return; // Nothing else needs to be done at this time
@ -3318,6 +3316,9 @@ void CMSCollector::setup_cms_unloading_and_verification_state() {
// Not unloading classes this cycle
assert(!should_unload_classes(), "Inconsitency!");
remove_root_scanning_option(SharedHeap::SO_SystemClasses);
add_root_scanning_option(SharedHeap::SO_AllClasses);
if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
// Include symbols, strings and code cache elements to prevent their resurrection.
add_root_scanning_option(rso);

View File

@ -2008,7 +2008,7 @@ jint G1CollectedHeap::initialize() {
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
size_t max_byte_size = collector_policy()->max_heap_byte_size();
size_t heap_alignment = collector_policy()->max_alignment();
size_t heap_alignment = collector_policy()->heap_alignment();
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
@ -6656,13 +6656,18 @@ class RegisterNMethodOopClosure: public OopClosure {
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
HeapRegion* hr = _g1h->heap_region_containing(obj);
assert(!hr->isHumongous(), "code root in humongous region?");
assert(!hr->continuesHumongous(),
err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
" starting at "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
// HeapRegion::add_strong_code_root() avoids adding duplicate
// entries but having duplicates is OK since we "mark" nmethods
// as visited when we scan the strong code root lists during the GC.
hr->add_strong_code_root(_nm);
assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?");
assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr)));
}
}
@ -6683,9 +6688,15 @@ class UnregisterNMethodOopClosure: public OopClosure {
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
HeapRegion* hr = _g1h->heap_region_containing(obj);
assert(!hr->isHumongous(), "code root in humongous region?");
assert(!hr->continuesHumongous(),
err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
" starting at "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
hr->remove_strong_code_root(_nm);
assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?");
assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr)));
}
}
@ -6716,7 +6727,9 @@ void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion *hr) {
assert(!hr->isHumongous(), "humongous region in collection set?");
assert(!hr->isHumongous(),
err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
HR_FORMAT_PARAMS(hr)));
hr->migrate_strong_code_roots();
return false;
}
@ -6796,9 +6809,13 @@ public:
bool doHeapRegion(HeapRegion *hr) {
HeapRegionRemSet* hrrs = hr->rem_set();
if (hr->isHumongous()) {
// Code roots should never be attached to a humongous region
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
if (hr->continuesHumongous()) {
// Code roots should never be attached to a continuation of a humongous region
assert(hrrs->strong_code_roots_list_length() == 0,
err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
" starting at "HR_FORMAT", but has "INT32_FORMAT,
HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
hrrs->strong_code_roots_list_length()));
return false;
}

View File

@ -313,27 +313,38 @@ G1CollectorPolicy::G1CollectorPolicy() :
// for the first time during initialization.
_reserve_regions = 0;
initialize_all();
_collectionSetChooser = new CollectionSetChooser();
_young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
}
void G1CollectorPolicy::initialize_alignments() {
_space_alignment = HeapRegion::GrainBytes;
size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
_heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
}
void G1CollectorPolicy::initialize_flags() {
_min_alignment = HeapRegion::GrainBytes;
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
_max_alignment = MAX3(card_table_alignment, _min_alignment, page_size);
if (G1HeapRegionSize != HeapRegion::GrainBytes) {
FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes);
}
if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified");
}
CollectorPolicy::initialize_flags();
_young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
}
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max");
assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds");
assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds");
void G1CollectorPolicy::post_heap_initialize() {
uintx max_regions = G1CollectedHeap::heap()->max_regions();
size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
if (max_young_size != MaxNewSize) {
FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size);
}
}
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
_min_desired_young_length(0), _max_desired_young_length(0) {
if (FLAG_IS_CMDLINE(NewRatio)) {
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
@ -344,8 +355,13 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(
}
}
if (FLAG_IS_CMDLINE(NewSize) && FLAG_IS_CMDLINE(MaxNewSize) && NewSize > MaxNewSize) {
vm_exit_during_initialization("Initial young gen size set larger than the maximum young gen size");
if (NewSize > MaxNewSize) {
if (FLAG_IS_CMDLINE(MaxNewSize)) {
warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
"A new max generation size of " SIZE_FORMAT "k will be used.",
NewSize/K, MaxNewSize/K, NewSize/K);
}
MaxNewSize = NewSize;
}
if (FLAG_IS_CMDLINE(NewSize)) {
@ -378,34 +394,48 @@ uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regio
return MAX2(1U, default_value);
}
void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
assert(new_number_of_heap_regions > 0, "Heap must be initialized");
void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {
assert(number_of_heap_regions > 0, "Heap must be initialized");
switch (_sizer_kind) {
case SizerDefaults:
_min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
_max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
*min_young_length = calculate_default_min_length(number_of_heap_regions);
*max_young_length = calculate_default_max_length(number_of_heap_regions);
break;
case SizerNewSizeOnly:
_max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
_max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
*max_young_length = calculate_default_max_length(number_of_heap_regions);
*max_young_length = MAX2(*min_young_length, *max_young_length);
break;
case SizerMaxNewSizeOnly:
_min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
_min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
*min_young_length = calculate_default_min_length(number_of_heap_regions);
*min_young_length = MIN2(*min_young_length, *max_young_length);
break;
case SizerMaxAndNewSize:
// Do nothing. Values set on the command line, don't update them at runtime.
break;
case SizerNewRatio:
_min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
_max_desired_young_length = _min_desired_young_length;
*min_young_length = number_of_heap_regions / (NewRatio + 1);
*max_young_length = *min_young_length;
break;
default:
ShouldNotReachHere();
}
assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
}
uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
// We need to pass the desired values because recalculation may not update these
// values in some cases.
uint temp = _min_desired_young_length;
uint result = _max_desired_young_length;
recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
return result;
}
void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
&_max_desired_young_length);
}
void G1CollectorPolicy::init() {

View File

@ -136,8 +136,16 @@ private:
uint calculate_default_min_length(uint new_number_of_heap_regions);
uint calculate_default_max_length(uint new_number_of_heap_regions);
// Update the given values for minimum and maximum young gen length in regions
// given the number of heap regions depending on the kind of sizing algorithm.
void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
public:
G1YoungGenSizer();
// Calculate the maximum length of the young gen given the number of regions
// depending on the sizing algorithm.
uint max_young_length(uint number_of_heap_regions);
void heap_size_changed(uint new_number_of_heap_regions);
uint min_desired_young_length() {
return _min_desired_young_length;
@ -165,13 +173,9 @@ private:
G1MMUTracker* _mmu_tracker;
void initialize_alignments();
void initialize_flags();
void initialize_all() {
initialize_flags();
initialize_size_info();
}
CollectionSetChooser* _collectionSetChooser;
double _full_collection_start_sec;
@ -217,7 +221,6 @@ private:
return _during_marking;
}
private:
enum PredictionConstants {
TruncatedSeqLength = 10
};
@ -665,8 +668,6 @@ public:
BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
// Record the start and end of an evacuation pause.
@ -934,6 +935,7 @@ public:
// Calculates survivor space parameters.
void update_survivors_policy();
virtual void post_heap_initialize();
};
// This should move to some place more general...

View File

@ -377,11 +377,6 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
dcqs.concatenate_logs();
if (G1CollectedHeap::use_parallel_gc_threads()) {
// Don't set the number of workers here. It will be set
// when the task is run
// _seq_task->set_n_termination((int)n_workers());
}
guarantee( _cards_scanned == NULL, "invariant" );
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
for (uint i = 0; i < n_workers(); ++i) {

View File

@ -174,11 +174,6 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
region_size = MAX_REGION_SIZE;
}
if (region_size != G1HeapRegionSize) {
// Update the flag to make sure that PrintFlagsFinal logs the correct value
FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size);
}
// And recalculate the log.
region_size_log = log2_long((jlong) region_size);
@ -606,7 +601,9 @@ void HeapRegion::remove_strong_code_root(nmethod* nm) {
void HeapRegion::migrate_strong_code_roots() {
assert(in_collection_set(), "only collection set regions");
assert(!isHumongous(), "not humongous regions");
assert(!isHumongous(),
err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
HR_FORMAT_PARAMS(this)));
HeapRegionRemSet* hrrs = rem_set();
hrrs->migrate_strong_code_roots();
@ -727,12 +724,11 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
return;
}
// An H-region should have an empty strong code root list
if (isHumongous()) {
if (continuesHumongous()) {
if (strong_code_roots_length > 0) {
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
"but has "INT32_FORMAT" code root entries",
bottom(), end(), strong_code_roots_length);
gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
"region but has "INT32_FORMAT" code root entries",
HR_FORMAT_PARAMS(this), strong_code_roots_length);
*failures = true;
}
return;

View File

@ -1004,7 +1004,9 @@ public:
void HeapRegionRemSet::migrate_strong_code_roots() {
assert(hr()->in_collection_set(), "only collection set regions");
assert(!hr()->isHumongous(), "not humongous regions");
assert(!hr()->isHumongous(),
err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
HR_FORMAT_PARAMS(hr())));
ResourceMark rm;

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
#include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
// If boundary moving is being used, create the young gen and old
@ -32,15 +33,17 @@
// the old behavior otherwise (with PSYoungGen and PSOldGen).
AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
size_t init_low_byte_size,
size_t min_low_byte_size,
size_t max_low_byte_size,
size_t init_high_byte_size,
size_t min_high_byte_size,
size_t max_high_byte_size,
GenerationSizer* policy,
size_t alignment) :
_virtual_spaces(old_young_rs, min_low_byte_size,
min_high_byte_size, alignment) {
_virtual_spaces(old_young_rs, policy->min_gen1_size(),
policy->min_gen0_size(), alignment) {
size_t init_low_byte_size = policy->initial_gen1_size();
size_t min_low_byte_size = policy->min_gen1_size();
size_t max_low_byte_size = policy->max_gen1_size();
size_t init_high_byte_size = policy->initial_gen0_size();
size_t min_high_byte_size = policy->min_gen0_size();
size_t max_high_byte_size = policy->max_gen0_size();
assert(min_low_byte_size <= init_low_byte_size &&
init_low_byte_size <= max_low_byte_size, "Parameter check");
assert(min_high_byte_size <= init_high_byte_size &&

View File

@ -28,6 +28,7 @@
#include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
#include "gc_implementation/parallelScavenge/asPSOldGen.hpp"
#include "gc_implementation/parallelScavenge/asPSYoungGen.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
// Contains two generations that both use an AdjoiningVirtualSpaces.
@ -56,14 +57,7 @@ class AdjoiningGenerations : public CHeapObj<mtGC> {
bool request_young_gen_expansion(size_t desired_change_in_bytes);
public:
AdjoiningGenerations(ReservedSpace rs,
size_t init_low_byte_size,
size_t min_low_byte_size,
size_t max_low_byte_size,
size_t init_high_byte_size,
size_t min_high_byte_size,
size_t max_high_bytes_size,
size_t alignment);
AdjoiningGenerations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
// Accessors
PSYoungGen* young_gen() { return _young_gen; }

View File

@ -54,7 +54,6 @@ ASPSOldGen::ASPSOldGen(size_t initial_size,
int level) :
PSOldGen(initial_size, min_size, size_limit, gen_name, level),
_gen_size_limit(size_limit)
{}
ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs,
@ -65,13 +64,11 @@ ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs,
int level) :
PSOldGen(initial_size, min_size, size_limit, gen_name, level),
_gen_size_limit(size_limit)
{
_virtual_space = vs;
}
void ASPSOldGen::initialize_work(const char* perf_data_name, int level) {
PSOldGen::initialize_work(perf_data_name, level);
// The old gen can grow to gen_size_limit(). _reserve reflects only
@ -94,7 +91,7 @@ size_t ASPSOldGen::available_for_expansion() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
size_t result = gen_size_limit() - virtual_space()->committed_size();
size_t result_aligned = align_size_down(result, heap->old_gen_alignment());
size_t result_aligned = align_size_down(result, heap->generation_alignment());
return result_aligned;
}
@ -105,7 +102,7 @@ size_t ASPSOldGen::available_for_contraction() {
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t gen_alignment = heap->old_gen_alignment();
const size_t gen_alignment = heap->generation_alignment();
PSAdaptiveSizePolicy* policy = heap->size_policy();
const size_t working_size =
used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();

View File

@ -70,13 +70,12 @@ void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
}
size_t ASPSYoungGen::available_for_expansion() {
size_t current_committed_size = virtual_space()->committed_size();
assert((gen_size_limit() >= current_committed_size),
"generation size limit is wrong");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
size_t result = gen_size_limit() - current_committed_size;
size_t result_aligned = align_size_down(result, heap->young_gen_alignment());
size_t result_aligned = align_size_down(result, heap->generation_alignment());
return result_aligned;
}
@ -85,7 +84,6 @@ size_t ASPSYoungGen::available_for_expansion() {
// Future implementations could check the survivors and if to_space is in the
// right place (below from_space), take a chunk from to_space.
size_t ASPSYoungGen::available_for_contraction() {
size_t uncommitted_bytes = virtual_space()->uncommitted_size();
if (uncommitted_bytes != 0) {
return uncommitted_bytes;
@ -94,8 +92,8 @@ size_t ASPSYoungGen::available_for_contraction() {
if (eden_space()->is_empty()) {
// Respect the minimum size for eden and for the young gen as a whole.
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t eden_alignment = heap->intra_heap_alignment();
const size_t gen_alignment = heap->young_gen_alignment();
const size_t eden_alignment = heap->space_alignment();
const size_t gen_alignment = heap->generation_alignment();
assert(eden_space()->capacity_in_bytes() >= eden_alignment,
"Alignment is wrong");
@ -121,7 +119,6 @@ size_t ASPSYoungGen::available_for_contraction() {
gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K);
}
return result_aligned;
}
return 0;
@ -132,7 +129,7 @@ size_t ASPSYoungGen::available_for_contraction() {
// to_space can be.
size_t ASPSYoungGen::available_to_live() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_heap_alignment();
const size_t alignment = heap->space_alignment();
// Include any space that is committed but is not in eden.
size_t available = pointer_delta(eden_space()->bottom(),
@ -296,7 +293,7 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
assert(eden_start < from_start, "Cannot push into from_space");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_heap_alignment();
const size_t alignment = heap->space_alignment();
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "memory/collectorPolicy.hpp"
void GenerationSizer::trace_gen_sizes(const char* const str) {
if (TracePageSizes) {
tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
SIZE_FORMAT "," SIZE_FORMAT " "
SIZE_FORMAT,
str,
_min_gen1_size / K, _max_gen1_size / K,
_min_gen0_size / K, _max_gen0_size / K,
_max_heap_byte_size / K);
}
}
void GenerationSizer::initialize_alignments() {
_space_alignment = _gen_alignment = default_gen_alignment();
_heap_alignment = compute_heap_alignment();
}
void GenerationSizer::initialize_flags() {
// Do basic sizing work
TwoGenerationCollectorPolicy::initialize_flags();
assert(UseSerialGC ||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
(ParallelGCThreads > 0),
"ParallelGCThreads should be set before flag initialization");
// The survivor ratio's are calculated "raw", unlike the
// default gc, which adds 2 to the ratio value. We need to
// make sure the values are valid before using them.
if (MinSurvivorRatio < 3) {
FLAG_SET_ERGO(uintx, MinSurvivorRatio, 3);
}
if (InitialSurvivorRatio < 3) {
FLAG_SET_ERGO(uintx, InitialSurvivorRatio, 3);
}
}
void GenerationSizer::initialize_size_info() {
trace_gen_sizes("ps heap raw");
const size_t page_sz = os::page_size_for_region(_min_heap_byte_size,
_max_heap_byte_size,
8);
// Can a page size be something else than a power of two?
assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
size_t new_alignment = round_to(page_sz, _gen_alignment);
if (new_alignment != _gen_alignment) {
_gen_alignment = new_alignment;
_space_alignment = new_alignment;
// Redo everything from the start
initialize_flags();
}
TwoGenerationCollectorPolicy::initialize_size_info();
trace_gen_sizes("ps heap rnd");
}

View File

@ -31,41 +31,17 @@
// TwoGenerationCollectorPolicy. Lets reuse it!
class GenerationSizer : public TwoGenerationCollectorPolicy {
public:
GenerationSizer() {
// Partial init only!
initialize_flags();
initialize_size_info();
}
private:
void initialize_flags() {
// Do basic sizing work
TwoGenerationCollectorPolicy::initialize_flags();
void trace_gen_sizes(const char* const str);
assert(UseSerialGC ||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
(ParallelGCThreads > 0),
"ParallelGCThreads should be set before flag initialization");
// The alignment used for boundary between young gen and old gen
static size_t default_gen_alignment() { return 64 * K * HeapWordSize; }
// The survivor ratio's are calculated "raw", unlike the
// default gc, which adds 2 to the ratio value. We need to
// make sure the values are valid before using them.
if (MinSurvivorRatio < 3) {
MinSurvivorRatio = 3;
}
protected:
if (InitialSurvivorRatio < 3) {
InitialSurvivorRatio = 3;
}
}
size_t min_young_gen_size() { return _min_gen0_size; }
size_t young_gen_size() { return _initial_gen0_size; }
size_t max_young_gen_size() { return _max_gen0_size; }
size_t min_old_gen_size() { return _min_gen1_size; }
size_t old_gen_size() { return _initial_gen1_size; }
size_t max_old_gen_size() { return _max_gen1_size; }
void initialize_alignments();
void initialize_flags();
void initialize_size_info();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP

View File

@ -52,76 +52,20 @@ PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
static void trace_gen_sizes(const char* const str,
size_t og_min, size_t og_max,
size_t yg_min, size_t yg_max)
{
if (TracePageSizes) {
tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
SIZE_FORMAT "," SIZE_FORMAT " "
SIZE_FORMAT,
str,
og_min / K, og_max / K,
yg_min / K, yg_max / K,
(og_max + yg_max) / K);
}
}
jint ParallelScavengeHeap::initialize() {
CollectedHeap::pre_initialize();
// Cannot be initialized until after the flags are parsed
// GenerationSizer flag_parser;
// Initialize collector policy
_collector_policy = new GenerationSizer();
_collector_policy->initialize_all();
size_t yg_min_size = _collector_policy->min_young_gen_size();
size_t yg_max_size = _collector_policy->max_young_gen_size();
size_t og_min_size = _collector_policy->min_old_gen_size();
size_t og_max_size = _collector_policy->max_old_gen_size();
trace_gen_sizes("ps heap raw",
og_min_size, og_max_size,
yg_min_size, yg_max_size);
const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
yg_max_size + og_max_size,
8);
const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
// Update sizes to reflect the selected page size(s).
//
// NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
// should check UseAdaptiveSizePolicy. Changes from generationSizer could
// move to the common code.
yg_min_size = align_size_up(yg_min_size, yg_align);
yg_max_size = align_size_up(yg_max_size, yg_align);
size_t yg_cur_size =
align_size_up(_collector_policy->young_gen_size(), yg_align);
yg_cur_size = MAX2(yg_cur_size, yg_min_size);
og_min_size = align_size_up(og_min_size, og_align);
// Align old gen size down to preserve specified heap size.
assert(og_align == yg_align, "sanity");
og_max_size = align_size_down(og_max_size, og_align);
og_max_size = MAX2(og_max_size, og_min_size);
size_t og_cur_size =
align_size_down(_collector_policy->old_gen_size(), og_align);
og_cur_size = MAX2(og_cur_size, og_min_size);
trace_gen_sizes("ps heap rnd",
og_min_size, og_max_size,
yg_min_size, yg_max_size);
const size_t heap_size = og_max_size + yg_max_size;
ReservedSpace heap_rs = Universe::reserve_heap(heap_size, og_align);
const size_t heap_size = _collector_policy->max_heap_byte_size();
ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
os::trace_page_sizes("ps main", og_min_size + yg_min_size,
og_max_size + yg_max_size, og_page_sz,
os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(),
heap_size, generation_alignment(),
heap_rs.base(),
heap_rs.size());
if (!heap_rs.is_reserved()) {
@ -142,12 +86,6 @@ jint ParallelScavengeHeap::initialize() {
return JNI_ENOMEM;
}
// Initial young gen size is 4 Mb
//
// XXX - what about flag_parser.young_gen_size()?
const size_t init_young_size = align_size_up(4 * M, yg_align);
yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
// Make up the generations
// Calculate the maximum size that a generation can grow. This
// includes growth into the other generation. Note that the
@ -157,14 +95,7 @@ jint ParallelScavengeHeap::initialize() {
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
_gens = new AdjoiningGenerations(heap_rs,
og_cur_size,
og_min_size,
og_max_size,
yg_cur_size,
yg_min_size,
yg_max_size,
yg_align);
_gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
_old_gen = _gens->old_gen();
_young_gen = _gens->young_gen();
@ -176,7 +107,7 @@ jint ParallelScavengeHeap::initialize() {
new PSAdaptiveSizePolicy(eden_capacity,
initial_promo_size,
young_gen()->to_space()->capacity_in_bytes(),
intra_heap_alignment(),
_collector_policy->gen_alignment(),
max_gc_pause_sec,
max_gc_minor_pause_sec,
GCTimeRatio

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
#include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
@ -32,14 +33,12 @@
#include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/collectorPolicy.hpp"
#include "utilities/ostream.hpp"
class AdjoiningGenerations;
class CollectorPolicy;
class GCHeapSummary;
class GCTaskManager;
class GenerationSizer;
class CollectorPolicy;
class PSAdaptiveSizePolicy;
class PSHeapSummary;
@ -50,24 +49,20 @@ class ParallelScavengeHeap : public CollectedHeap {
static PSOldGen* _old_gen;
// Sizing policy for entire heap
static PSAdaptiveSizePolicy* _size_policy;
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
static PSAdaptiveSizePolicy* _size_policy;
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
static ParallelScavengeHeap* _psh;
size_t _young_gen_alignment;
size_t _old_gen_alignment;
GenerationSizer* _collector_policy;
inline size_t set_alignment(size_t& var, size_t val);
// Collection of generations that are adjacent in the
// space reserved for the heap.
AdjoiningGenerations* _gens;
unsigned int _death_march_count;
static GCTaskManager* _gc_task_manager; // The task manager.
// The task manager
static GCTaskManager* _gc_task_manager;
void trace_heap(GCWhen::Type when, GCTracer* tracer);
@ -80,16 +75,7 @@ class ParallelScavengeHeap : public CollectedHeap {
HeapWord* mem_allocate_old_gen(size_t size);
public:
ParallelScavengeHeap() : CollectedHeap() {
_death_march_count = 0;
set_alignment(_young_gen_alignment, intra_heap_alignment());
set_alignment(_old_gen_alignment, intra_heap_alignment());
}
// Return the (conservative) maximum heap alignment
static size_t conservative_max_heap_alignment() {
return intra_heap_alignment();
}
ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { }
// For use by VM operations
enum CollectionType {
@ -103,8 +89,8 @@ class ParallelScavengeHeap : public CollectedHeap {
virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
static PSYoungGen* young_gen() { return _young_gen; }
static PSOldGen* old_gen() { return _old_gen; }
static PSYoungGen* young_gen() { return _young_gen; }
static PSOldGen* old_gen() { return _old_gen; }
virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
@ -121,13 +107,15 @@ class ParallelScavengeHeap : public CollectedHeap {
void post_initialize();
void update_counters();
// The alignment used for the various generations.
size_t young_gen_alignment() const { return _young_gen_alignment; }
size_t old_gen_alignment() const { return _old_gen_alignment; }
// The alignment used for eden and survivors within the young gen
// and for boundary between young gen and old gen.
static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
// The alignment used for the various areas
size_t space_alignment() { return _collector_policy->space_alignment(); }
size_t generation_alignment() { return _collector_policy->gen_alignment(); }
// Return the (conservative) maximum heap alignment
static size_t conservative_max_heap_alignment() {
return CollectorPolicy::compute_heap_alignment();
}
size_t capacity() const;
size_t used() const;
@ -157,16 +145,15 @@ class ParallelScavengeHeap : public CollectedHeap {
virtual bool is_in_partial_collection(const void *p);
#endif
bool is_in_young(oop p); // reserved part
bool is_in_old(oop p); // reserved part
bool is_in_young(oop p); // reserved part
bool is_in_old(oop p); // reserved part
// Memory allocation. "gc_time_limit_was_exceeded" will
// be set to true if the adaptive size policy determine that
// an excessive amount of time is being spent doing collections
// and caused a NULL to be returned. If a NULL is not returned,
// "gc_time_limit_was_exceeded" has an undefined meaning.
HeapWord* mem_allocate(size_t size,
bool* gc_overhead_limit_was_exceeded);
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
// Allocation attempt(s) during a safepoint. It should never be called
// to allocate a new TLAB as this allocation might be satisfied out
@ -257,17 +244,10 @@ class ParallelScavengeHeap : public CollectedHeap {
// Call these in sequential code around the processing of strong roots.
class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
public:
public:
ParStrongRootsScope();
~ParStrongRootsScope();
};
};
inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
{
assert(is_power_of_2((intptr_t)val), "must be a power of 2");
var = round_to(val, intra_heap_alignment());
return var;
}
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

View File

@ -37,7 +37,7 @@
PSAdaptiveSizePolicy::PSAdaptiveSizePolicy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size,
size_t intra_generation_alignment,
size_t space_alignment,
double gc_pause_goal_sec,
double gc_minor_pause_goal_sec,
uint gc_cost_ratio) :
@ -46,9 +46,8 @@ PSAdaptiveSizePolicy::PSAdaptiveSizePolicy(size_t init_eden_size,
init_survivor_size,
gc_pause_goal_sec,
gc_cost_ratio),
_collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin/
100.0),
_intra_generation_alignment(intra_generation_alignment),
_collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin / 100.0),
_space_alignment(space_alignment),
_live_at_last_full_gc(init_promo_size),
_gc_minor_pause_goal_sec(gc_minor_pause_goal_sec),
_latest_major_mutator_interval_seconds(0),
@ -353,11 +352,10 @@ void PSAdaptiveSizePolicy::compute_eden_space_size(
}
// Align everything and make a final limit check
const size_t alignment = _intra_generation_alignment;
desired_eden_size = align_size_up(desired_eden_size, alignment);
desired_eden_size = MAX2(desired_eden_size, alignment);
desired_eden_size = align_size_up(desired_eden_size, _space_alignment);
desired_eden_size = MAX2(desired_eden_size, _space_alignment);
eden_limit = align_size_down(eden_limit, alignment);
eden_limit = align_size_down(eden_limit, _space_alignment);
// And one last limit check, now that we've aligned things.
if (desired_eden_size > eden_limit) {
@ -561,11 +559,10 @@ void PSAdaptiveSizePolicy::compute_old_gen_free_space(
}
// Align everything and make a final limit check
const size_t alignment = _intra_generation_alignment;
desired_promo_size = align_size_up(desired_promo_size, alignment);
desired_promo_size = MAX2(desired_promo_size, alignment);
desired_promo_size = align_size_up(desired_promo_size, _space_alignment);
desired_promo_size = MAX2(desired_promo_size, _space_alignment);
promo_limit = align_size_down(promo_limit, alignment);
promo_limit = align_size_down(promo_limit, _space_alignment);
// And one last limit check, now that we've aligned things.
desired_promo_size = MIN2(desired_promo_size, promo_limit);
@ -650,7 +647,7 @@ void PSAdaptiveSizePolicy::adjust_promo_for_minor_pause_time(bool is_full_gc,
}
// If the desired eden size is as small as it will get,
// try to adjust the old gen size.
if (*desired_eden_size_ptr <= _intra_generation_alignment) {
if (*desired_eden_size_ptr <= _space_alignment) {
// Vary the old gen size to reduce the young gen pause. This
// may not be a good idea. This is just a test.
if (minor_pause_old_estimator()->decrement_will_decrease()) {
@ -755,7 +752,7 @@ void PSAdaptiveSizePolicy::adjust_eden_for_pause_time(bool is_full_gc,
// If the promo size is at the minimum (i.e., the old gen
// size will not actually decrease), consider changing the
// young gen size.
if (*desired_promo_size_ptr < _intra_generation_alignment) {
if (*desired_promo_size_ptr < _space_alignment) {
// If increasing the young generation will decrease the old gen
// pause, do it.
// During startup there is noise in the statistics for deciding
@ -1066,24 +1063,24 @@ size_t PSAdaptiveSizePolicy::eden_increment(size_t cur_eden) {
size_t PSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) {
size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement);
return align_size_up(result, _intra_generation_alignment);
return align_size_up(result, _space_alignment);
}
size_t PSAdaptiveSizePolicy::eden_increment_aligned_down(size_t cur_eden) {
size_t result = eden_increment(cur_eden);
return align_size_down(result, _intra_generation_alignment);
return align_size_down(result, _space_alignment);
}
size_t PSAdaptiveSizePolicy::eden_increment_with_supplement_aligned_up(
size_t cur_eden) {
size_t result = eden_increment(cur_eden,
YoungGenerationSizeIncrement + _young_gen_size_increment_supplement);
return align_size_up(result, _intra_generation_alignment);
return align_size_up(result, _space_alignment);
}
size_t PSAdaptiveSizePolicy::eden_decrement_aligned_down(size_t cur_eden) {
size_t eden_heap_delta = eden_decrement(cur_eden);
return align_size_down(eden_heap_delta, _intra_generation_alignment);
return align_size_down(eden_heap_delta, _space_alignment);
}
size_t PSAdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
@ -1105,24 +1102,24 @@ size_t PSAdaptiveSizePolicy::promo_increment(size_t cur_promo) {
size_t PSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) {
size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement);
return align_size_up(result, _intra_generation_alignment);
return align_size_up(result, _space_alignment);
}
size_t PSAdaptiveSizePolicy::promo_increment_aligned_down(size_t cur_promo) {
size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement);
return align_size_down(result, _intra_generation_alignment);
return align_size_down(result, _space_alignment);
}
size_t PSAdaptiveSizePolicy::promo_increment_with_supplement_aligned_up(
size_t cur_promo) {
size_t result = promo_increment(cur_promo,
TenuredGenerationSizeIncrement + _old_gen_size_increment_supplement);
return align_size_up(result, _intra_generation_alignment);
return align_size_up(result, _space_alignment);
}
size_t PSAdaptiveSizePolicy::promo_decrement_aligned_down(size_t cur_promo) {
size_t promo_heap_delta = promo_decrement(cur_promo);
return align_size_down(promo_heap_delta, _intra_generation_alignment);
return align_size_down(promo_heap_delta, _space_alignment);
}
size_t PSAdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
@ -1135,9 +1132,9 @@ uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
bool is_survivor_overflow,
uint tenuring_threshold,
size_t survivor_limit) {
assert(survivor_limit >= _intra_generation_alignment,
assert(survivor_limit >= _space_alignment,
"survivor_limit too small");
assert((size_t)align_size_down(survivor_limit, _intra_generation_alignment)
assert((size_t)align_size_down(survivor_limit, _space_alignment)
== survivor_limit, "survivor_limit not aligned");
// This method is called even if the tenuring threshold and survivor
@ -1201,8 +1198,8 @@ uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
// We're trying to pad the survivor size as little as possible without
// overflowing the survivor spaces.
size_t target_size = align_size_up((size_t)_avg_survived->padded_average(),
_intra_generation_alignment);
target_size = MAX2(target_size, _intra_generation_alignment);
_space_alignment);
target_size = MAX2(target_size, _space_alignment);
if (target_size > survivor_limit) {
// Target size is bigger than we can handle. Let's also reduce

View File

@ -91,7 +91,7 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
// for making ergonomic decisions.
double _latest_major_mutator_interval_seconds;
const size_t _intra_generation_alignment; // alignment for eden, survivors
const size_t _space_alignment; // alignment for eden, survivors
const double _gc_minor_pause_goal_sec; // goal for maximum minor gc pause
@ -229,7 +229,7 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
PSAdaptiveSizePolicy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size,
size_t intra_generation_alignment,
size_t space_alignment,
double gc_pause_goal_sec,
double gc_minor_pause_goal_sec,
uint gc_time_ratio);
@ -378,7 +378,7 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
// remain almost full anyway (top() will be near end(), but there will be a
// large filler object at the bottom).
const size_t sz = gen_size / MinSurvivorRatio;
const size_t alignment = _intra_generation_alignment;
const size_t alignment = _space_alignment;
return sz > alignment ? align_size_down(sz, alignment) : alignment;
}

View File

@ -103,7 +103,7 @@ void PSYoungGen::initialize_work() {
// Compute maximum space sizes for performance counters
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
size_t alignment = heap->intra_heap_alignment();
size_t alignment = heap->space_alignment();
size_t size = virtual_space()->reserved_size();
size_t max_survivor_size;
@ -156,8 +156,9 @@ void PSYoungGen::compute_initial_space_boundaries() {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Compute sizes
size_t alignment = heap->intra_heap_alignment();
size_t alignment = heap->space_alignment();
size_t size = virtual_space()->committed_size();
assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
size_t survivor_size = size / InitialSurvivorRatio;
survivor_size = align_size_down(survivor_size, alignment);
@ -207,7 +208,7 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
#ifndef PRODUCT
void PSYoungGen::space_invariants() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_heap_alignment();
const size_t alignment = heap->space_alignment();
// Currently, our eden size cannot shrink to zero
guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
@ -491,7 +492,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
char* to_end = (char*)to_space()->end();
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_heap_alignment();
const size_t alignment = heap->space_alignment();
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
@ -840,8 +841,8 @@ size_t PSYoungGen::available_to_min_gen() {
size_t PSYoungGen::available_to_live() {
size_t delta_in_survivor = 0;
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t space_alignment = heap->intra_heap_alignment();
const size_t gen_alignment = heap->young_gen_alignment();
const size_t space_alignment = heap->space_alignment();
const size_t gen_alignment = heap->generation_alignment();
MutableSpace* space_shrinking = NULL;
if (from_space()->end() > to_space()->end()) {

View File

@ -469,6 +469,10 @@ void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
fill_with_object_impl(start, words, zap);
}
void CollectedHeap::post_initialize() {
collector_policy()->post_heap_initialize();
}
HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
guarantee(false, "thread-local allocation buffers not supported");
return NULL;

View File

@ -152,11 +152,13 @@ CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
// Could be an Object method inherited into an interface, but still a vtable call.
kind = CallInfo::vtable_call;
} else if (!resolved_klass->is_interface()) {
// A miranda method. Compute the vtable index.
// A default or miranda method. Compute the vtable index.
ResourceMark rm;
klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
index = vt->index_of_miranda(resolved_method->name(),
resolved_method->signature());
index = LinkResolver::vtable_index_of_interface_method(resolved_klass,
resolved_method);
assert(index >= 0 , "we should have valid vtable index at this point");
kind = CallInfo::vtable_call;
} else if (resolved_method->has_vtable_index()) {
// Can occur if an interface redeclares a method of Object.
@ -279,7 +281,7 @@ void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, Klass
}
int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
methodHandle resolved_method, TRAPS) {
methodHandle resolved_method) {
int vtable_index = Method::invalid_vtable_index;
Symbol* name = resolved_method->name();
@ -295,7 +297,7 @@ int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
}
if (vtable_index == Method::invalid_vtable_index) {
// get vtable_index for miranda methods
ResourceMark rm(THREAD);
ResourceMark rm;
klassVtable *vt = InstanceKlass::cast(klass())->vtable();
vtable_index = vt->index_of_miranda(name, signature);
}
@ -691,7 +693,7 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
@ -937,7 +939,7 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
@ -1017,7 +1019,7 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle
);
sel_method->access_flags().print_on(tty);
if (sel_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (sel_method->is_overpass()) {
tty->print("overpass");
@ -1081,7 +1083,7 @@ void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
@ -1118,7 +1120,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
// do lookup based on receiver klass using the vtable index
if (resolved_method->method_holder()->is_interface()) { // miranda method
vtable_index = vtable_index_of_interface_method(resolved_klass,
resolved_method, CHECK);
resolved_method);
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@ -1175,7 +1177,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
);
selected_method->access_flags().print_on(tty);
if (selected_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (selected_method->is_overpass()) {
tty->print("overpass");
@ -1268,14 +1270,6 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
sel_method->name(),
sel_method->signature()));
}
// setup result
if (!resolved_method->has_itable_index()) {
int vtable_index = resolved_method->vtable_index();
assert(vtable_index == sel_method->vtable_index(), "sanity check");
result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
return;
}
int itable_index = resolved_method()->itable_index();
if (TraceItables && Verbose) {
ResourceMark rm(THREAD);
@ -1289,14 +1283,22 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
);
sel_method->access_flags().print_on(tty);
if (sel_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (sel_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
// setup result
if (!resolved_method->has_itable_index()) {
int vtable_index = resolved_method->vtable_index();
assert(vtable_index == sel_method->vtable_index(), "sanity check");
result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
} else {
int itable_index = resolved_method()->itable_index();
result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
}
}

View File

@ -130,7 +130,6 @@ class LinkResolver: AllStatic {
static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method, TRAPS);
static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS);
static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
@ -186,6 +185,7 @@ class LinkResolver: AllStatic {
static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method);
// same as above for compile-time resolution; returns vtable_index if current_klass if linked
static int resolve_virtual_vtable_index (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);

View File

@ -72,19 +72,21 @@ void Rewriter::compute_index_maps() {
// Unrewrite the bytecodes if an error occurs.
void Rewriter::restore_bytecodes() {
int len = _methods->length();
bool invokespecial_error = false;
for (int i = len-1; i >= 0; i--) {
Method* method = _methods->at(i);
scan_method(method, true);
scan_method(method, true, &invokespecial_error);
assert(!invokespecial_error, "reversing should not get an invokespecial error");
}
}
// Creates a constant pool cache given a CPC map
void Rewriter::make_constant_pool_cache(TRAPS) {
const int length = _cp_cache_map.length();
ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data();
ConstantPoolCache* cache =
ConstantPoolCache::allocate(loader_data, length, _cp_cache_map,
ConstantPoolCache::allocate(loader_data, _cp_cache_map,
_invokedynamic_cp_cache_map,
_invokedynamic_references_map, CHECK);
// initialize object cache in constant pool
@ -154,6 +156,30 @@ void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
}
}
// If the constant pool entry for invokespecial is InterfaceMethodref,
// we need to add a separate cpCache entry for its resolution, because it is
// different than the resolution for invokeinterface with InterfaceMethodref.
// These cannot share cpCache entries. It's unclear if all invokespecial to
// InterfaceMethodrefs would resolve to the same thing so a new cpCache entry
// is created for each one. This was added with lambda.
void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, bool* invokespecial_error) {
address p = bcp + offset;
if (!reverse) {
int cp_index = Bytes::get_Java_u2(p);
if (_pool->tag_at(cp_index).is_interface_method()) {
int cache_index = add_invokespecial_cp_cache_entry(cp_index);
if (cache_index != (int)(jushort) cache_index) {
*invokespecial_error = true;
}
Bytes::put_native_u2(p, cache_index);
} else {
rewrite_member_reference(bcp, offset, reverse);
}
} else {
rewrite_member_reference(bcp, offset, reverse);
}
}
// Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
@ -203,7 +229,7 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
if (!reverse) {
int cp_index = Bytes::get_Java_u2(p);
int cache_index = add_invokedynamic_cp_cache_entry(cp_index);
add_invokedynamic_resolved_references_entries(cp_index, cache_index);
int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index);
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
// not just one per distinct CP entry. In other words, the
@ -212,13 +238,20 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
// all these entries. That is the main reason invokedynamic
// must have a five-byte instruction format. (Of course, other JVM
// implementations can use the bytes for other purposes.)
Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index));
// Note: We use native_u4 format exclusively for 4-byte indexes.
Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index));
// add the bcp in case we need to patch this bytecode if we also find a
// invokespecial/InterfaceMethodref in the bytecode stream
_patch_invokedynamic_bcps->push(p);
_patch_invokedynamic_refs->push(resolved_index);
} else {
// callsite index
int cache_index = ConstantPool::decode_invokedynamic_index(
Bytes::get_native_u4(p));
int cp_index = cp_cache_entry_pool_index(cache_index);
// We will reverse the bytecode rewriting _after_ adjusting them.
// Adjust the cache index by offset to the invokedynamic entries in the
// cpCache plus the delta if the invokedynamic bytecodes were adjusted.
cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
// zero out 4 bytes
Bytes::put_Java_u4(p, 0);
@ -226,6 +259,34 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
}
}
void Rewriter::patch_invokedynamic_bytecodes() {
// If the end of the cp_cache is the same as after initializing with the
// cpool, nothing needs to be done. Invokedynamic bytecodes are at the
// correct offsets. ie. no invokespecials added
int delta = cp_cache_delta();
if (delta > 0) {
int length = _patch_invokedynamic_bcps->length();
assert(length == _patch_invokedynamic_refs->length(),
"lengths should match");
for (int i = 0; i < length; i++) {
address p = _patch_invokedynamic_bcps->at(i);
int cache_index = ConstantPool::decode_invokedynamic_index(
Bytes::get_native_u4(p));
Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index + delta));
// invokedynamic resolved references map also points to cp cache and must
// add delta to each.
int resolved_index = _patch_invokedynamic_refs->at(i);
for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
assert(_invokedynamic_references_map[resolved_index+entry] == cache_index,
"should be the same index");
_invokedynamic_references_map.at_put(resolved_index+entry,
cache_index + delta);
}
}
}
}
// Rewrite some ldc bytecodes to _fast_aldc
void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
@ -269,7 +330,7 @@ void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
// Rewrites a method given the index_map information
void Rewriter::scan_method(Method* method, bool reverse) {
void Rewriter::scan_method(Method* method, bool reverse, bool* invokespecial_error) {
int nof_jsrs = 0;
bool has_monitor_bytecodes = false;
@ -329,12 +390,17 @@ void Rewriter::scan_method(Method* method, bool reverse) {
#endif
break;
}
case Bytecodes::_invokespecial : {
rewrite_invokespecial(bcp, prefix_length+1, reverse, invokespecial_error);
break;
}
case Bytecodes::_getstatic : // fall through
case Bytecodes::_putstatic : // fall through
case Bytecodes::_getfield : // fall through
case Bytecodes::_putfield : // fall through
case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface:
case Bytecodes::_invokehandle : // if reverse=true
@ -423,12 +489,26 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Me
// rewrite methods, in two passes
int len = _methods->length();
bool invokespecial_error = false;
for (int i = len-1; i >= 0; i--) {
Method* method = _methods->at(i);
scan_method(method);
scan_method(method, false, &invokespecial_error);
if (invokespecial_error) {
// If you get an error here, there is no reversing bytecodes
// This exception is stored for this class and no further attempt is
// made at verifying or rewriting.
THROW_MSG(vmSymbols::java_lang_InternalError(),
"This classfile overflows invokespecial for interfaces "
"and cannot be loaded");
return;
}
}
// May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
// entries had to be added.
patch_invokedynamic_bytecodes();
// allocate constant pool cache, now that we've seen all the bytecodes
make_constant_pool_cache(THREAD);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,55 +46,102 @@ class Rewriter: public StackObj {
intArray _method_handle_invokers;
int _resolved_reference_limit;
// For mapping invokedynamic bytecodes, which are discovered during method
// scanning. The invokedynamic entries are added at the end of the cpCache.
// If there are any invokespecial/InterfaceMethodref special case bytecodes,
// these entries are added before invokedynamic entries so that the
// invokespecial bytecode 16 bit index doesn't overflow.
intStack _invokedynamic_cp_cache_map;
// For patching.
GrowableArray<address>* _patch_invokedynamic_bcps;
GrowableArray<int>* _patch_invokedynamic_refs;
void init_maps(int length) {
_cp_map.initialize(length, -1);
// Choose an initial value large enough that we don't get frequent
// calls to grow().
_cp_cache_map.initialize(length / 2);
_cp_cache_map.initialize(length/2);
// Also cache resolved objects, in another different cache.
_reference_map.initialize(length, -1);
_resolved_references_map.initialize(length / 2);
_invokedynamic_references_map.initialize(length / 2);
_resolved_references_map.initialize(length/2);
_invokedynamic_references_map.initialize(length/2);
_resolved_reference_limit = -1;
DEBUG_ONLY(_cp_cache_index_limit = -1);
_first_iteration_cp_cache_limit = -1;
// invokedynamic specific fields
_invokedynamic_cp_cache_map.initialize(length/4);
_patch_invokedynamic_bcps = new GrowableArray<address>(length/4);
_patch_invokedynamic_refs = new GrowableArray<int>(length/4);
}
int _cp_cache_index_limit;
int _first_iteration_cp_cache_limit;
void record_map_limits() {
#ifdef ASSERT
// Record initial size of the two arrays generated for the CP cache:
_cp_cache_index_limit = _cp_cache_map.length();
#endif //ASSERT
// Record initial size of the two arrays generated for the CP cache
// relative to walking the constant pool.
_first_iteration_cp_cache_limit = _cp_cache_map.length();
_resolved_reference_limit = _resolved_references_map.length();
}
int cp_cache_delta() {
// How many cp cache entries were added since recording map limits after
// cp cache initialization?
assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration");
return _cp_cache_map.length() - _first_iteration_cp_cache_limit;
}
int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; }
bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) {
assert(cp_map->at(cp_index) == -1, "not twice on same cp_index");
int cache_index = cp_cache_map->append(cp_index);
cp_map->at_put(cp_index, cache_index);
return cache_index;
}
int add_cp_cache_entry(int cp_index) {
assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version");
assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration");
int cache_index = _cp_cache_map.append(cp_index);
_cp_map.at_put(cp_index, cache_index);
assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration");
int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map);
assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
return cache_index;
}
// add a new CP cache entry beyond the normal cache (for invokedynamic only)
int add_invokedynamic_cp_cache_entry(int cp_index) {
assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version");
assert(_cp_map[cp_index] == -1, "do not map from cp_index");
assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration");
assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration");
// add to the invokedynamic index map.
int cache_index = _invokedynamic_cp_cache_map.append(cp_index);
// do not update _cp_map, since the mapping is one-to-many
assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, "");
// this index starts at one but in the bytecode it's appended to the end.
return cache_index + _first_iteration_cp_cache_limit;
}
int invokedynamic_cp_cache_entry_pool_index(int cache_index) {
int cp_index = _invokedynamic_cp_cache_map[cache_index];
return cp_index;
}
// add a new CP cache entry beyond the normal cache for the special case of
// invokespecial with InterfaceMethodref as cpool operand.
int add_invokespecial_cp_cache_entry(int cp_index) {
assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration");
// Don't add InterfaceMethodref if it already exists at the end.
for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) {
if (cp_cache_entry_pool_index(i) == cp_index) {
return i;
}
}
int cache_index = _cp_cache_map.append(cp_index);
assert(cache_index >= _cp_cache_index_limit, "");
assert(cache_index >= _first_iteration_cp_cache_limit, "");
// do not update _cp_map, since the mapping is one-to-many
assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
return cache_index;
}
// fix duplicated code later
int cp_entry_to_resolved_references(int cp_index) const {
assert(has_entry_in_resolved_references(cp_index), "oob");
return _reference_map[cp_index];
@ -105,10 +152,7 @@ class Rewriter: public StackObj {
// add a new entry to the resolved_references map
int add_resolved_references_entry(int cp_index) {
assert(_reference_map[cp_index] == -1, "not twice on same cp_index");
assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration");
int ref_index = _resolved_references_map.append(cp_index);
_reference_map.at_put(cp_index, ref_index);
int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map);
assert(cp_entry_to_resolved_references(cp_index) == ref_index, "");
return ref_index;
}
@ -137,7 +181,7 @@ class Rewriter: public StackObj {
// Access the contents of _cp_cache_map to determine CP cache layout.
int cp_cache_entry_pool_index(int cache_index) {
int cp_index = _cp_cache_map[cache_index];
return cp_index;
return cp_index;
}
// All the work goes in here:
@ -145,12 +189,16 @@ class Rewriter: public StackObj {
void compute_index_maps();
void make_constant_pool_cache(TRAPS);
void scan_method(Method* m, bool reverse = false);
void scan_method(Method* m, bool reverse, bool* invokespecial_error);
void rewrite_Object_init(methodHandle m, TRAPS);
void rewrite_member_reference(address bcp, int offset, bool reverse = false);
void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false);
void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
void rewrite_member_reference(address bcp, int offset, bool reverse);
void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse);
void rewrite_invokedynamic(address bcp, int offset, bool reverse);
void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse);
void rewrite_invokespecial(address bcp, int offset, bool reverse, bool* invokespecial_error);
void patch_invokedynamic_bytecodes();
// Revert bytecodes in case of an exception.
void restore_bytecodes();

View File

@ -47,54 +47,107 @@
// CollectorPolicy methods.
void CollectorPolicy::initialize_flags() {
assert(_max_alignment >= _min_alignment,
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
_max_alignment, _min_alignment));
assert(_max_alignment % _min_alignment == 0,
err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
_max_alignment, _min_alignment));
CollectorPolicy::CollectorPolicy() :
_space_alignment(0),
_heap_alignment(0),
_initial_heap_byte_size(InitialHeapSize),
_max_heap_byte_size(MaxHeapSize),
_min_heap_byte_size(Arguments::min_heap_size()),
_max_heap_size_cmdline(false),
_size_policy(NULL),
_should_clear_all_soft_refs(false),
_all_soft_refs_clear(false)
{}
if (MaxHeapSize < InitialHeapSize) {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
}
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
#ifdef ASSERT
void CollectorPolicy::assert_flags() {
assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes");
assert(InitialHeapSize % _heap_alignment == 0, "InitialHeapSize alignment");
assert(MaxHeapSize % _heap_alignment == 0, "MaxHeapSize alignment");
}
void CollectorPolicy::initialize_size_info() {
// User inputs from -mx and ms must be aligned
_min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment);
_initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment);
_max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment);
void CollectorPolicy::assert_size_info() {
assert(InitialHeapSize == _initial_heap_byte_size, "Discrepancy between InitialHeapSize flag and local storage");
assert(MaxHeapSize == _max_heap_byte_size, "Discrepancy between MaxHeapSize flag and local storage");
assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes");
assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes");
assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes");
assert(_min_heap_byte_size % _heap_alignment == 0, "min_heap_byte_size alignment");
assert(_initial_heap_byte_size % _heap_alignment == 0, "initial_heap_byte_size alignment");
assert(_max_heap_byte_size % _heap_alignment == 0, "max_heap_byte_size alignment");
}
#endif // ASSERT
void CollectorPolicy::initialize_flags() {
assert(_space_alignment != 0, "Space alignment not set up properly");
assert(_heap_alignment != 0, "Heap alignment not set up properly");
assert(_heap_alignment >= _space_alignment,
err_msg("heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT,
_heap_alignment, _space_alignment));
assert(_heap_alignment % _space_alignment == 0,
err_msg("heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
_heap_alignment, _space_alignment));
if (FLAG_IS_CMDLINE(MaxHeapSize)) {
if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size");
}
if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) {
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
}
_max_heap_size_cmdline = true;
}
// Check heap parameter properties
if (_initial_heap_byte_size < M) {
if (InitialHeapSize < M) {
vm_exit_during_initialization("Too small initial heap");
}
// Check heap parameter properties
if (_min_heap_byte_size < M) {
vm_exit_during_initialization("Too small minimum heap");
}
if (_initial_heap_byte_size <= NewSize) {
// make sure there is at least some room in old space
vm_exit_during_initialization("Too small initial heap for new size specified");
// User inputs from -Xmx and -Xms must be aligned
_min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment);
uintx aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment);
uintx aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment);
// Write back to flags if the values changed
if (aligned_initial_heap_size != InitialHeapSize) {
FLAG_SET_ERGO(uintx, InitialHeapSize, aligned_initial_heap_size);
}
if (_max_heap_byte_size < _min_heap_byte_size) {
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
}
if (_initial_heap_byte_size < _min_heap_byte_size) {
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
}
if (_max_heap_byte_size < _initial_heap_byte_size) {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
if (aligned_max_heap_size != MaxHeapSize) {
FLAG_SET_ERGO(uintx, MaxHeapSize, aligned_max_heap_size);
}
if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 &&
InitialHeapSize < _min_heap_byte_size) {
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
}
if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize);
} else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize);
if (InitialHeapSize < _min_heap_byte_size) {
_min_heap_byte_size = InitialHeapSize;
}
}
_initial_heap_byte_size = InitialHeapSize;
_max_heap_byte_size = MaxHeapSize;
FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));
DEBUG_ONLY(CollectorPolicy::assert_flags();)
}
void CollectorPolicy::initialize_size_info() {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
_min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
}
DEBUG_ONLY(CollectorPolicy::assert_size_info();)
}
bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
@ -105,7 +158,6 @@ bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
int max_covered_regions) {
assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name");
return new CardTableRS(whole_heap, max_covered_regions);
}
@ -119,7 +171,7 @@ void CollectorPolicy::cleared_all_soft_refs() {
_all_soft_refs_clear = true;
}
size_t CollectorPolicy::compute_max_alignment() {
size_t CollectorPolicy::compute_heap_alignment() {
// The card marking array and the offset arrays for old generations are
// committed in os pages as well. Make sure they are entirely full (to
// avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
@ -146,18 +198,21 @@ size_t CollectorPolicy::compute_max_alignment() {
// GenCollectorPolicy methods.
GenCollectorPolicy::GenCollectorPolicy() :
_min_gen0_size(0),
_initial_gen0_size(0),
_max_gen0_size(0),
_gen_alignment(0),
_generations(NULL)
{}
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
size_t x = base_size / (NewRatio+1);
size_t new_gen_size = x > _min_alignment ?
align_size_down(x, _min_alignment) :
_min_alignment;
return new_gen_size;
return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment);
}
size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
size_t maximum_size) {
size_t alignment = _min_alignment;
size_t max_minus = maximum_size - alignment;
size_t max_minus = maximum_size - _gen_alignment;
return desired_size < max_minus ? desired_size : max_minus;
}
@ -165,7 +220,7 @@ size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size) {
const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
_size_policy = new AdaptiveSizePolicy(init_eden_size,
init_promo_size,
init_survivor_size,
@ -173,100 +228,181 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
GCTimeRatio);
}
void GenCollectorPolicy::initialize_flags() {
// All sizes must be multiples of the generation granularity.
_min_alignment = (uintx) Generation::GenGrain;
_max_alignment = compute_max_alignment();
size_t GenCollectorPolicy::young_gen_size_lower_bound() {
// The young generation must be aligned and have room for eden + two survivors
return align_size_up(3 * _space_alignment, _gen_alignment);
}
#ifdef ASSERT
void GenCollectorPolicy::assert_flags() {
CollectorPolicy::assert_flags();
assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size");
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
assert(NewSize % _gen_alignment == 0, "NewSize alignment");
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment");
}
void TwoGenerationCollectorPolicy::assert_flags() {
GenCollectorPolicy::assert_flags();
assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
assert(OldSize % _gen_alignment == 0, "OldSize alignment");
}
void GenCollectorPolicy::assert_size_info() {
CollectorPolicy::assert_size_info();
// GenCollectorPolicy::initialize_size_info may update the MaxNewSize
assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
assert(NewSize == _initial_gen0_size, "Discrepancy between NewSize flag and local storage");
assert(MaxNewSize == _max_gen0_size, "Discrepancy between MaxNewSize flag and local storage");
assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes");
assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes");
assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment");
assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment");
assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment");
}
void TwoGenerationCollectorPolicy::assert_size_info() {
GenCollectorPolicy::assert_size_info();
assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage");
assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes");
assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes");
assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment");
assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment");
assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes");
}
#endif // ASSERT
void GenCollectorPolicy::initialize_flags() {
CollectorPolicy::initialize_flags();
// All generational heaps have a youngest gen; handle those flags here.
assert(_gen_alignment != 0, "Generation alignment not set up properly");
assert(_heap_alignment >= _gen_alignment,
err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT,
_heap_alignment, _gen_alignment));
assert(_gen_alignment % _space_alignment == 0,
err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
_gen_alignment, _space_alignment));
assert(_heap_alignment % _gen_alignment == 0,
err_msg("heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT,
_heap_alignment, _gen_alignment));
// All generational heaps have a youngest gen; handle those flags here
// Make sure the heap is large enough for two generations
uintx smallest_new_size = young_gen_size_lower_bound();
uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment),
_heap_alignment);
if (MaxHeapSize < smallest_heap_size) {
FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size);
_max_heap_byte_size = MaxHeapSize;
}
// If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
if (_min_heap_byte_size < smallest_heap_size) {
_min_heap_byte_size = smallest_heap_size;
if (InitialHeapSize < _min_heap_byte_size) {
FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size);
_initial_heap_byte_size = smallest_heap_size;
}
}
// Now take the actual NewSize into account. We will silently increase NewSize
// if the user specified a smaller value.
smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment));
if (smallest_new_size != NewSize) {
FLAG_SET_ERGO(uintx, NewSize, smallest_new_size);
}
_initial_gen0_size = NewSize;
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size);
if (MaxNewSize >= MaxHeapSize) {
// Make sure there is room for an old generation
uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
if (FLAG_IS_CMDLINE(MaxNewSize)) {
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
"heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.",
MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
}
FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size);
if (NewSize > MaxNewSize) {
FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
_initial_gen0_size = NewSize;
}
} else if (MaxNewSize < min_new_size) {
FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size);
} else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
}
_max_gen0_size = MaxNewSize;
}
// Adjust max size parameters
if (NewSize > MaxNewSize) {
MaxNewSize = NewSize;
// At this point this should only happen if the user specifies a large NewSize and/or
// a small (but not too small) MaxNewSize.
if (FLAG_IS_CMDLINE(MaxNewSize)) {
warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
"A new max generation size of " SIZE_FORMAT "k will be used.",
NewSize/K, MaxNewSize/K, NewSize/K);
}
FLAG_SET_ERGO(uintx, MaxNewSize, NewSize);
_max_gen0_size = MaxNewSize;
}
NewSize = align_size_down(NewSize, _min_alignment);
MaxNewSize = align_size_down(MaxNewSize, _min_alignment);
// Check validity of heap flags
assert(NewSize % _min_alignment == 0, "eden space alignment");
assert(MaxNewSize % _min_alignment == 0, "survivor space alignment");
if (NewSize < 3 * _min_alignment) {
// make sure there room for eden and two survivor spaces
vm_exit_during_initialization("Too small new size specified");
}
if (SurvivorRatio < 1 || NewRatio < 1) {
vm_exit_during_initialization("Invalid young gen ratio specified");
}
DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
}
void TwoGenerationCollectorPolicy::initialize_flags() {
GenCollectorPolicy::initialize_flags();
OldSize = align_size_down(OldSize, _min_alignment);
if (!is_size_aligned(OldSize, _gen_alignment)) {
FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment));
}
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
// NewRatio will be used later to set the young generation size so we use
// it to calculate how big the heap should be based on the requested OldSize
// and NewRatio.
assert(NewRatio > 0, "NewRatio should have been set up earlier");
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment);
MaxHeapSize = calculated_heapsize;
InitialHeapSize = calculated_heapsize;
calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment);
FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize);
_max_heap_byte_size = MaxHeapSize;
FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize);
_initial_heap_byte_size = InitialHeapSize;
}
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
// adjust max heap size if necessary
if (NewSize + OldSize > MaxHeapSize) {
if (FLAG_IS_CMDLINE(MaxHeapSize)) {
if (_max_heap_size_cmdline) {
// somebody set a maximum heap size with the intention that we should not
// exceed it. Adjust New/OldSize as necessary.
uintx calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size;
// align
NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
// OldSize is already aligned because above we aligned MaxHeapSize to
// _max_alignment, and we just made sure that NewSize is aligned to
// _min_alignment. In initialize_flags() we verified that _max_alignment
// is a multiple of _min_alignment.
OldSize = MaxHeapSize - NewSize;
} else {
MaxHeapSize = NewSize + OldSize;
}
}
// need to do this again
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment);
FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
_initial_gen0_size = NewSize;
// adjust max heap size if necessary
if (NewSize + OldSize > MaxHeapSize) {
if (FLAG_IS_CMDLINE(MaxHeapSize)) {
// somebody set a maximum heap size with the intention that we should not
// exceed it. Adjust New/OldSize as necessary.
uintx calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size;
// align
NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
// OldSize is already aligned because above we aligned MaxHeapSize to
// _max_alignment, and we just made sure that NewSize is aligned to
// _min_alignment. In initialize_flags() we verified that _max_alignment
// is a multiple of _min_alignment.
OldSize = MaxHeapSize - NewSize;
// _heap_alignment, and we just made sure that NewSize is aligned to
// _gen_alignment. In initialize_flags() we verified that _heap_alignment
// is a multiple of _gen_alignment.
FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize);
} else {
MaxHeapSize = NewSize + OldSize;
FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
_max_heap_byte_size = MaxHeapSize;
}
}
// need to do this again
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
always_do_update_barrier = UseConcMarkSweepGC;
// Check validity of heap flags
assert(OldSize % _min_alignment == 0, "old space alignment");
assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment");
DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();)
}
// Values set on the command line win over any ergonomically
@ -281,7 +417,7 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
void GenCollectorPolicy::initialize_size_info() {
CollectorPolicy::initialize_size_info();
// _min_alignment is used for alignment within a generation.
// _space_alignment is used for alignment within a generation.
// There is additional alignment done down stream for some
// collectors that sometimes causes unwanted rounding up of
// generations sizes.
@ -289,35 +425,8 @@ void GenCollectorPolicy::initialize_size_info() {
// Determine maximum size of gen0
size_t max_new_size = 0;
if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
if (MaxNewSize < _min_alignment) {
max_new_size = _min_alignment;
}
if (MaxNewSize >= _max_heap_byte_size) {
max_new_size = align_size_down(_max_heap_byte_size - _min_alignment,
_min_alignment);
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
"greater than the entire heap (" SIZE_FORMAT "k). A "
"new generation size of " SIZE_FORMAT "k will be used.",
MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K);
} else {
max_new_size = align_size_down(MaxNewSize, _min_alignment);
}
// The case for FLAG_IS_ERGO(MaxNewSize) could be treated
// specially at this point to just use an ergonomically set
// MaxNewSize to set max_new_size. For cases with small
// heaps such a policy often did not work because the MaxNewSize
// was larger than the entire heap. The interpretation given
// to ergonomically set flags is that the flags are set
// by different collectors for their own special needs but
// are not allowed to badly shape the heap. This allows the
// different collectors to decide what's best for themselves
// without having to factor in the overall heap shape. It
// can be the case in the future that the collectors would
// only make "wise" ergonomics choices and this policy could
// just accept those choices. The choices currently made are
// not always "wise".
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
max_new_size = MaxNewSize;
} else {
max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
// Bound the maximum size by NewSize below (since it historically
@ -386,11 +495,22 @@ void GenCollectorPolicy::initialize_size_info() {
_min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
}
// Write back to flags if necessary
if (NewSize != _initial_gen0_size) {
FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
}
if (MaxNewSize != _max_gen0_size) {
FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size);
}
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
}
DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
}
// Call this method during the sizing of the gen1 to make
@ -403,23 +523,18 @@ void GenCollectorPolicy::initialize_size_info() {
// keeping it simple also seems a worthwhile goal.
bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
size_t* gen1_size_ptr,
const size_t heap_size,
const size_t min_gen1_size) {
const size_t heap_size) {
bool result = false;
if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
(heap_size >= min_gen1_size + _min_alignment)) {
// Adjust gen0 down to accommodate min_gen1_size
*gen0_size_ptr = heap_size - min_gen1_size;
*gen0_size_ptr =
MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment);
assert(*gen0_size_ptr > 0, "Min gen0 is too large");
if ((*gen0_size_ptr + *gen1_size_ptr) > heap_size) {
uintx smallest_new_size = young_gen_size_lower_bound();
if ((heap_size < (*gen0_size_ptr + _min_gen1_size)) &&
(heap_size >= _min_gen1_size + smallest_new_size)) {
// Adjust gen0 down to accommodate _min_gen1_size
*gen0_size_ptr = align_size_down_bounded(heap_size - _min_gen1_size, _gen_alignment);
result = true;
} else {
*gen1_size_ptr = heap_size - *gen0_size_ptr;
*gen1_size_ptr =
MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment);
*gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment);
}
}
return result;
@ -440,41 +555,36 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// The maximum gen1 size can be determined from the maximum gen0
// and maximum heap size since no explicit flags exits
// for setting the gen1 maximum.
_max_gen1_size = _max_heap_byte_size - _max_gen0_size;
_max_gen1_size =
MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment);
_max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment);
// If no explicit command line flag has been set for the
// gen1 size, use what is left for gen1.
if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
// The user has not specified any value or ergonomics
// has chosen a value (which may or may not be consistent
if (!FLAG_IS_CMDLINE(OldSize)) {
// The user has not specified any value but the ergonomics
// may have chosen a value (which may or may not be consistent
// with the overall heap size). In either case make
// the minimum, maximum and initial sizes consistent
// with the gen0 sizes and the overall heap sizes.
assert(_min_heap_byte_size > _min_gen0_size,
"gen0 has an unexpected minimum size");
_min_gen1_size = _min_heap_byte_size - _min_gen0_size;
_min_gen1_size =
MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment);
_initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
_initial_gen1_size =
MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment);
_min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment);
_initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment);
// _max_gen1_size has already been made consistent above
FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
} else {
// It's been explicitly set on the command line. Use the
// OldSize and then determine the consequences.
_min_gen1_size = OldSize;
_min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
_initial_gen1_size = OldSize;
// If the user has explicitly set an OldSize that is inconsistent
// with other command line flags, issue a warning.
// The generation minimums and the overall heap mimimum should
// be within one heap alignment.
if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) {
// be within one generation alignment.
if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
warning("Inconsistency between minimum heap size and minimum "
"generation sizes: using minimum heap = " SIZE_FORMAT,
_min_heap_byte_size);
}
if ((OldSize > _max_gen1_size)) {
if (OldSize > _max_gen1_size) {
warning("Inconsistency between maximum heap size and maximum "
"generation sizes: using maximum heap = " SIZE_FORMAT
" -XX:OldSize flag is being ignored",
@ -482,8 +592,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
}
// If there is an inconsistency between the OldSize and the minimum and/or
// initial size of gen0, since OldSize was explicitly set, OldSize wins.
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
_min_heap_byte_size, OldSize)) {
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
@ -492,7 +601,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
}
// Initial size
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
_initial_heap_byte_size, OldSize)) {
_initial_heap_byte_size)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
@ -507,11 +616,26 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
_initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
_initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
// Write back to flags if necessary
if (NewSize != _initial_gen0_size) {
FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
}
if (MaxNewSize != _max_gen0_size) {
FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size);
}
if (OldSize != _initial_gen1_size) {
FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
}
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
_min_gen1_size, _initial_gen1_size, _max_gen1_size);
}
DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_size_info();)
}
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
@ -605,9 +729,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
gc_count_before = Universe::heap()->total_collections();
}
VM_GenCollectForAllocation op(size,
is_tlab,
gc_count_before);
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
VMThread::execute(&op);
if (op.prologue_succeeded()) {
result = op.result();
@ -836,14 +958,16 @@ bool GenCollectorPolicy::should_try_older_generation_allocation(
// MarkSweepPolicy methods
//
MarkSweepPolicy::MarkSweepPolicy() {
initialize_all();
void MarkSweepPolicy::initialize_alignments() {
_space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
_heap_alignment = compute_heap_alignment();
}
void MarkSweepPolicy::initialize_generations() {
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
if (_generations == NULL)
if (_generations == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
}
if (UseParNewGC) {
_generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
@ -852,8 +976,9 @@ void MarkSweepPolicy::initialize_generations() {
}
_generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
if (_generations[0] == NULL || _generations[1] == NULL)
if (_generations[0] == NULL || _generations[1] == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
}
}
void MarkSweepPolicy::initialize_gc_policy_counters() {

View File

@ -61,17 +61,23 @@ class CollectorPolicy : public CHeapObj<mtGC> {
protected:
GCPolicyCounters* _gc_policy_counters;
// Requires that the concrete subclass sets the alignment constraints
// before calling.
virtual void initialize_alignments() = 0;
virtual void initialize_flags();
virtual void initialize_size_info();
DEBUG_ONLY(virtual void assert_flags();)
DEBUG_ONLY(virtual void assert_size_info();)
size_t _initial_heap_byte_size;
size_t _max_heap_byte_size;
size_t _min_heap_byte_size;
size_t _min_alignment;
size_t _max_alignment;
size_t _space_alignment;
size_t _heap_alignment;
// Needed to keep information if MaxHeapSize was set on the command line
// when the flag value is aligned etc by ergonomics
bool _max_heap_size_cmdline;
// The sizing of the heap are controlled by a sizing policy.
AdaptiveSizePolicy* _size_policy;
@ -79,6 +85,7 @@ class CollectorPolicy : public CHeapObj<mtGC> {
// Set to true when policy wants soft refs cleared.
// Reset to false by gc after it clears all soft refs.
bool _should_clear_all_soft_refs;
// Set to true by the GC if the just-completed gc cleared all
// softrefs. This is set to true whenever a gc clears all softrefs, and
// set to false each time gc returns to the mutator. For example, in the
@ -86,23 +93,20 @@ class CollectorPolicy : public CHeapObj<mtGC> {
// mem_allocate() where it returns op.result()
bool _all_soft_refs_clear;
CollectorPolicy() :
_min_alignment(1),
_max_alignment(1),
_initial_heap_byte_size(0),
_max_heap_byte_size(0),
_min_heap_byte_size(0),
_size_policy(NULL),
_should_clear_all_soft_refs(false),
_all_soft_refs_clear(false)
{}
CollectorPolicy();
public:
// Return maximum heap alignment that may be imposed by the policy
static size_t compute_max_alignment();
virtual void initialize_all() {
initialize_alignments();
initialize_flags();
initialize_size_info();
}
size_t min_alignment() { return _min_alignment; }
size_t max_alignment() { return _max_alignment; }
// Return maximum heap alignment that may be imposed by the policy
static size_t compute_heap_alignment();
size_t space_alignment() { return _space_alignment; }
size_t heap_alignment() { return _heap_alignment; }
size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
size_t max_heap_byte_size() { return _max_heap_byte_size; }
@ -151,7 +155,6 @@ class CollectorPolicy : public CHeapObj<mtGC> {
virtual BarrierSet::Name barrier_set_name() = 0;
virtual GenRemSet::Name rem_set_name() = 0;
// Create the remembered set (to cover the given reserved region,
// allowing breaking up into at most "max_covered_regions").
@ -195,6 +198,9 @@ class CollectorPolicy : public CHeapObj<mtGC> {
return false;
}
// Do any updates required to global flags that are due to heap initialization
// changes
virtual void post_heap_initialize() = 0;
};
class ClearedAllSoftRefs : public StackObj {
@ -219,6 +225,10 @@ class GenCollectorPolicy : public CollectorPolicy {
size_t _initial_gen0_size;
size_t _max_gen0_size;
// _gen_alignment and _space_alignment will have the same value most of the
// time. When using large pages they can differ.
size_t _gen_alignment;
GenerationSpec **_generations;
// Return true if an allocation should be attempted in the older
@ -229,41 +239,50 @@ class GenCollectorPolicy : public CollectorPolicy {
void initialize_flags();
void initialize_size_info();
DEBUG_ONLY(void assert_flags();)
DEBUG_ONLY(void assert_size_info();)
// Try to allocate space by expanding the heap.
virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
// Scale the base_size by NewRation according to
// Compute max heap alignment
size_t compute_max_alignment();
// Scale the base_size by NewRatio according to
// result = base_size / (NewRatio + 1)
// and align by min_alignment()
size_t scale_by_NewRatio_aligned(size_t base_size);
// Bound the value by the given maximum minus the
// min_alignment.
// Bound the value by the given maximum minus the min_alignment
size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
public:
GenCollectorPolicy();
// Accessors
size_t min_gen0_size() { return _min_gen0_size; }
size_t initial_gen0_size() { return _initial_gen0_size; }
size_t max_gen0_size() { return _max_gen0_size; }
size_t gen_alignment() { return _gen_alignment; }
virtual int number_of_generations() = 0;
virtual GenerationSpec **generations() {
virtual GenerationSpec **generations() {
assert(_generations != NULL, "Sanity check");
return _generations;
}
virtual GenCollectorPolicy* as_generation_policy() { return this; }
virtual void initialize_generations() = 0;
virtual void initialize_generations() { };
virtual void initialize_all() {
initialize_flags();
initialize_size_info();
CollectorPolicy::initialize_all();
initialize_generations();
}
size_t young_gen_size_lower_bound();
HeapWord* mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded);
@ -274,6 +293,10 @@ class GenCollectorPolicy : public CollectorPolicy {
virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size);
virtual void post_heap_initialize() {
assert(_max_gen0_size == MaxNewSize, "Should be taken care of by initialize_size_info");
}
};
// All of hotspot's current collectors are subtypes of this
@ -290,9 +313,14 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
void initialize_flags();
void initialize_size_info();
void initialize_generations() { ShouldNotReachHere(); }
DEBUG_ONLY(void assert_flags();)
DEBUG_ONLY(void assert_size_info();)
public:
TwoGenerationCollectorPolicy() : GenCollectorPolicy(), _min_gen1_size(0),
_initial_gen1_size(0), _max_gen1_size(0) {}
// Accessors
size_t min_gen1_size() { return _min_gen1_size; }
size_t initial_gen1_size() { return _initial_gen1_size; }
@ -301,25 +329,25 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
// Inherited methods
TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
int number_of_generations() { return 2; }
BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; }
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
int number_of_generations() { return 2; }
BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; }
virtual CollectorPolicy::Name kind() {
return CollectorPolicy::TwoGenerationCollectorPolicyKind;
}
// Returns true is gen0 sizes were adjusted
// Returns true if gen0 sizes were adjusted
bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
const size_t heap_size, const size_t min_gen1_size);
const size_t heap_size);
};
class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
protected:
void initialize_alignments();
void initialize_generations();
public:
MarkSweepPolicy();
MarkSweepPolicy() {}
MarkSweepPolicy* as_mark_sweep_policy() { return this; }

View File

@ -204,7 +204,7 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
// Compute the maximum eden and survivor space sizes. These sizes
// are computed assuming the entire reserved space is committed.
// These values are exported as performance counters.
uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
uintx size = _virtual_space.reserved_size();
_max_survivor_size = compute_survivor_size(size, alignment);
_max_eden_size = size - (2*_max_survivor_size);
@ -235,7 +235,7 @@ void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
bool clear_space,
bool mangle_space) {
uintx alignment =
GenCollectedHeap::heap()->collector_policy()->min_alignment();
GenCollectedHeap::heap()->collector_policy()->space_alignment();
// If the spaces are being cleared (only done at heap initialization
// currently), the survivor spaces need not be empty.
@ -473,7 +473,7 @@ size_t DefNewGeneration::free() const {
}
size_t DefNewGeneration::max_capacity() const {
const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
const size_t reserved_bytes = reserved().byte_size();
return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
}

View File

@ -111,7 +111,7 @@ jint GenCollectedHeap::initialize() {
int n_covered_regions = 0;
ReservedSpace heap_rs;
size_t heap_alignment = collector_policy()->max_alignment();
size_t heap_alignment = collector_policy()->heap_alignment();
heap_address = allocate(heap_alignment, &total_reserved,
&n_covered_regions, &heap_rs);
@ -1053,12 +1053,6 @@ void GenCollectedHeap::save_marks() {
}
}
void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
for (int i = 0; i <= collectedGen; i++) {
_gens[i]->compute_new_size();
}
}
GenCollectedHeap* GenCollectedHeap::heap() {
assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");

View File

@ -86,10 +86,6 @@ public:
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
protected:
// Directs each generation up to and including "collectedGen" to recompute
// its desired size.
void compute_new_generation_sizes(int collectedGen);
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
bool is_tlab,

View File

@ -2975,11 +2975,6 @@ void Metaspace::initialize_class_space(ReservedSpace rs) {
#endif
// Align down. If the aligning result in 0, return 'alignment'.
static size_t restricted_align_down(size_t size, size_t alignment) {
return MAX2(alignment, align_size_down_(size, alignment));
}
void Metaspace::ergo_initialize() {
if (DumpSharedSpaces) {
// Using large pages when dumping the shared archive is currently not implemented.
@ -3002,13 +2997,13 @@ void Metaspace::ergo_initialize() {
// Ideally, we would be able to set the default value of MaxMetaspaceSize in
// globals.hpp to the aligned value, but this is not possible, since the
// alignment depends on other flags being parsed.
MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
if (MetaspaceSize > MaxMetaspaceSize) {
MetaspaceSize = MaxMetaspaceSize;
}
MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
@ -3016,10 +3011,10 @@ void Metaspace::ergo_initialize() {
vm_exit_during_initialization("Too small initial Metaspace size");
}
MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
set_compressed_class_space_size(CompressedClassSpaceSize);
}

View File

@ -247,6 +247,7 @@ void SharedHeap::set_barrier_set(BarrierSet* bs) {
}
void SharedHeap::post_initialize() {
CollectedHeap::post_initialize();
ref_processing_init();
}

View File

@ -785,6 +785,7 @@ jint Universe::initialize_heap() {
} else if (UseG1GC) {
#if INCLUDE_ALL_GCS
G1CollectorPolicy* g1p = new G1CollectorPolicy();
g1p->initialize_all();
G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
Universe::_collectedHeap = g1h;
#else // INCLUDE_ALL_GCS
@ -809,6 +810,7 @@ jint Universe::initialize_heap() {
} else { // default old generation
gc_policy = new MarkSweepPolicy();
}
gc_policy->initialize_all();
Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
}
@ -1041,7 +1043,7 @@ bool universe_post_init() {
Universe::_virtual_machine_error_instance =
InstanceKlass::cast(k)->allocate_instance(CHECK_false);
Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
if (!DumpSharedSpaces) {
// These are the only Java fields that are currently set during shared space dumping.

View File

@ -554,24 +554,37 @@ void ConstantPoolCacheEntry::verify(outputStream* st) const {
// Implementation of ConstantPoolCache
ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
int length,
const intStack& index_map,
const intStack& invokedynamic_index_map,
const intStack& invokedynamic_map, TRAPS) {
const int length = index_map.length() + invokedynamic_index_map.length();
int size = ConstantPoolCache::size(length);
return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
ConstantPoolCache(length, index_map, invokedynamic_map);
ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
}
void ConstantPoolCache::initialize(const intArray& inverse_index_map,
const intArray& invokedynamic_inverse_index_map,
const intArray& invokedynamic_references_map) {
assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
for (int i = 0; i < length(); i++) {
for (int i = 0; i < inverse_index_map.length(); i++) {
ConstantPoolCacheEntry* e = entry_at(i);
int original_index = inverse_index_map[i];
e->initialize_entry(original_index);
assert(entry_at(i) == e, "sanity");
}
// Append invokedynamic entries at the end
int invokedynamic_offset = inverse_index_map.length();
for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
int offset = i + invokedynamic_offset;
ConstantPoolCacheEntry* e = entry_at(offset);
int original_index = invokedynamic_inverse_index_map[i];
e->initialize_entry(original_index);
assert(entry_at(offset) == e, "sanity");
}
for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
const int cpci = invokedynamic_references_map[ref];
if (cpci >= 0) {

View File

@ -31,6 +31,10 @@
class PSPromotionManager;
// The ConstantPoolCache is not a cache! It is the resolution table that the
// interpreter uses to avoid going into the runtime and a way to access resolved
// values.
// A ConstantPoolCacheEntry describes an individual entry of the constant
// pool cache. There's 2 principal kinds of entries: field entries for in-
// stance & static field access, and method entries for invokes. Some of
@ -392,26 +396,33 @@ class ConstantPoolCache: public MetaspaceObj {
friend class MetadataFactory;
private:
int _length;
ConstantPool* _constant_pool; // the corresponding constant pool
ConstantPool* _constant_pool; // the corresponding constant pool
// Sizing
debug_only(friend class ClassVerifier;)
// Constructor
ConstantPoolCache(int length, const intStack& inverse_index_map,
ConstantPoolCache(int length,
const intStack& inverse_index_map,
const intStack& invokedynamic_inverse_index_map,
const intStack& invokedynamic_references_map) :
_length(length), _constant_pool(NULL) {
initialize(inverse_index_map, invokedynamic_references_map);
_length(length),
_constant_pool(NULL) {
initialize(inverse_index_map, invokedynamic_inverse_index_map,
invokedynamic_references_map);
for (int i = 0; i < length; i++) {
assert(entry_at(i)->is_f1_null(), "Failed to clear?");
}
}
// Initialization
void initialize(const intArray& inverse_index_map, const intArray& invokedynamic_references_map);
void initialize(const intArray& inverse_index_map,
const intArray& invokedynamic_inverse_index_map,
const intArray& invokedynamic_references_map);
public:
static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length,
const intStack& inverse_index_map,
static ConstantPoolCache* allocate(ClassLoaderData* loader_data,
const intStack& cp_cache_map,
const intStack& invokedynamic_cp_cache_map,
const intStack& invokedynamic_references_map, TRAPS);
bool is_constantPoolCache() const { return true; }

View File

@ -2211,6 +2211,10 @@ void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
data = mdo->next_data(data)) {
data->clean_weak_klass_links(is_alive);
}
ParametersTypeData* parameters = mdo->parameters_type_data();
if (parameters != NULL) {
parameters->clean_weak_klass_links(is_alive);
}
}
}
}

View File

@ -86,7 +86,11 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
*num_new_mirandas = new_mirandas.length();
vtable_length += *num_new_mirandas * vtableEntry::size();
// Interfaces do not need interface methods in their vtables
// This includes miranda methods and during later processing, default methods
if (!class_flags.is_interface()) {
vtable_length += *num_new_mirandas * vtableEntry::size();
}
if (Universe::is_bootstrapping() && vtable_length == 0) {
// array classes don't have their superclass set correctly during
@ -224,7 +228,11 @@ void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
}
// add miranda methods; it will also return the updated initialized
initialized = fill_in_mirandas(initialized);
// Interfaces do not need interface methods in their vtables
// This includes miranda methods and during later processing, default methods
if (!ik()->is_interface()) {
initialized = fill_in_mirandas(initialized);
}
// In class hierarchies where the accessibility is not increasing (i.e., going from private ->
// package_private -> public/protected), the vtable might actually be smaller than our initial
@ -264,12 +272,12 @@ InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper
_klass->internal_name(), sig, vtable_index);
super_method->access_flags().print_on(tty);
if (super_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
tty->print("overriders flags: ");
target_method->access_flags().print_on(tty);
if (target_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
}
#endif /*PRODUCT*/
@ -332,9 +340,15 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
// An interface never allocates new vtable slots, only inherits old ones.
// This method will either be assigned its own itable index later,
// or be assigned an inherited vtable index in the loop below.
// default methods store their vtable indices in the inheritors default_vtable_indices
assert (default_index == -1, "interfaces don't store resolved default methods");
target_method()->set_vtable_index(Method::pending_itable_index);
// default methods inherited by classes store their vtable indices
// in the inheritor's default_vtable_indices
// default methods inherited by interfaces may already have a
// valid itable index, if so, don't change it
// overpass methods in an interface will be assigned an itable index later
// by an inheriting class
if (!is_default || !target_method()->has_itable_index()) {
target_method()->set_vtable_index(Method::pending_itable_index);
}
}
// we need a new entry if there is no superclass
@ -441,7 +455,7 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
target_klass->internal_name(), sig, i);
super_method->access_flags().print_on(tty);
if (super_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (super_method->is_overpass()) {
tty->print("overpass");
@ -449,7 +463,7 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
tty->print("overriders flags: ");
target_method->access_flags().print_on(tty);
if (target_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (target_method->is_overpass()) {
tty->print("overpass");
@ -468,7 +482,7 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
target_klass->internal_name(), sig,i);
super_method->access_flags().print_on(tty);
if (super_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (super_method->is_overpass()) {
tty->print("overpass");
@ -476,7 +490,7 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
tty->print("overriders flags: ");
target_method->access_flags().print_on(tty);
if (target_method->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (target_method->is_overpass()) {
tty->print("overpass");
@ -494,8 +508,18 @@ void klassVtable::put_method_at(Method* m, int index) {
#ifndef PRODUCT
if (PrintVtables && Verbose) {
ResourceMark rm;
tty->print_cr("adding %s::%s at index %d", _klass->internal_name(),
(m != NULL) ? m->name()->as_C_string() : "<NULL>", index);
const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
tty->print("adding %s at index %d, flags: ", sig, index);
if (m != NULL) {
m->access_flags().print_on(tty);
if (m->is_default_method()) {
tty->print("default ");
}
if (m->is_overpass()) {
tty->print("overpass");
}
}
tty->cr();
}
#endif
table()[index].set(m);
@ -631,8 +655,10 @@ bool klassVtable::is_miranda_entry_at(int i) {
if (mhk->is_interface()) {
assert(m->is_public(), "should be public");
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
assert(is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super()), "should be a miranda_method");
return true;
// the search could find a miranda or a default method
if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) {
return true;
}
}
return false;
}
@ -644,9 +670,10 @@ bool klassVtable::is_miranda_entry_at(int i) {
// the caller must make sure that the method belongs to an interface implemented by the class
// Miranda methods only include public interface instance methods
// Not private methods, not static methods, not default == concrete abstract
// Miranda methods also do not include overpass methods in interfaces
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
Array<Method*>* default_methods, Klass* super) {
if (m->is_static() || m->is_private()) {
if (m->is_static() || m->is_private() || m->is_overpass()) {
return false;
}
Symbol* name = m->name();
@ -744,6 +771,8 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
// Discover miranda methods ("miranda" = "interface abstract, no binding"),
// and append them into the vtable starting at index initialized,
// return the new value of initialized.
// Miranda methods use vtable entries, but do not get assigned a vtable_index
// The vtable_index is discovered by searching from the end of the vtable
int klassVtable::fill_in_mirandas(int initialized) {
GrowableArray<Method*> mirandas(20);
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
@ -758,7 +787,7 @@ int klassVtable::fill_in_mirandas(int initialized) {
sig, initialized);
meth->access_flags().print_on(tty);
if (meth->is_default_method()) {
tty->print("default");
tty->print("default ");
}
tty->cr();
}
@ -858,7 +887,7 @@ void klassVtable::dump_vtable() {
tty->print(" (%5d) ", i);
m->access_flags().print_on(tty);
if (m->is_default_method()) {
tty->print("default");
tty->print("default ");
}
if (m->is_overpass()) {
tty->print("overpass");
@ -977,6 +1006,25 @@ int klassItable::assign_itable_indices_for_interface(Klass* klass) {
if (interface_method_needs_itable_index(m)) {
assert(!m->is_final_method(), "no final interface methods");
// If m is already assigned a vtable index, do not disturb it.
if (TraceItables && Verbose) {
ResourceMark rm;
const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
if (m->has_vtable_index()) {
tty->print("itable index %d for method: %s, flags: ", m->vtable_index(), sig);
} else {
tty->print("itable index %d for method: %s, flags: ", ime_num, sig);
}
if (m != NULL) {
m->access_flags().print_on(tty);
if (m->is_default_method()) {
tty->print("default ");
}
if (m->is_overpass()) {
tty->print("overpass");
}
}
tty->cr();
}
if (!m->has_vtable_index()) {
assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
m->set_itable_index(ime_num);
@ -1079,7 +1127,7 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
tty->print("target_method flags: ");
target()->access_flags().print_on(tty);
if (target()->is_default_method()) {
tty->print("default");
tty->print("default ");
}
tty->cr();
}
@ -1158,7 +1206,7 @@ void klassItable::dump_itable() {
tty->print(" (%5d) ", i);
m->access_flags().print_on(tty);
if (m->is_default_method()) {
tty->print("default");
tty->print("default ");
}
tty->print(" -- ");
m->print_name(tty);

View File

@ -275,23 +275,23 @@ void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* md
}
bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
return !is_type_none(p) &&
!((Klass*)klass_part(p))->is_loader_alive(is_alive_cl);
Klass* k = (Klass*)klass_part(p);
return k != NULL && k->is_loader_alive(is_alive_cl);
}
void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
for (int i = 0; i < _number_of_entries; i++) {
intptr_t p = type(i);
if (is_loader_alive(is_alive_cl, p)) {
set_type(i, type_none());
if (!is_loader_alive(is_alive_cl, p)) {
set_type(i, with_status((Klass*)NULL, p));
}
}
}
void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
intptr_t p = type();
if (is_loader_alive(is_alive_cl, p)) {
set_type(type_none());
if (!is_loader_alive(is_alive_cl, p)) {
set_type(with_status((Klass*)NULL, p));
}
}

View File

@ -690,7 +690,6 @@ public:
// recorded type: cell without bit 0 and 1
static intptr_t klass_part(intptr_t v) {
intptr_t r = v & type_klass_mask;
assert (r != 0, "invalid");
return r;
}
@ -698,7 +697,9 @@ public:
static Klass* valid_klass(intptr_t k) {
if (!is_type_none(k) &&
!is_type_unknown(k)) {
return (Klass*)klass_part(k);
Klass* res = (Klass*)klass_part(k);
assert(res != NULL, "invalid");
return res;
} else {
return NULL;
}

View File

@ -389,6 +389,10 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
return false;
}
if (inline_level() > _max_inline_level) {
if (callee_method->force_inline() && inline_level() > MaxForceInlineLevel) {
set_msg("MaxForceInlineLevel");
return false;
}
if (!callee_method->force_inline() || !IncrementalInline) {
set_msg("inlining too deep");
return false;

View File

@ -776,7 +776,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
const int vtable_index = Method::invalid_vtable_index;
CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
if (cg != NULL && cg->is_inline())
return cg;
}
@ -846,7 +846,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
}
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
if (cg != NULL && cg->is_inline())
return cg;
}

View File

@ -42,6 +42,13 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// so disable this for now
return NULL;
}
if (n->is_MathExact()) {
// MathExact has projections that are not correctly handled in the code
// below.
return NULL;
}
int wins = 0;
assert(!n->is_CFG(), "");
assert(region->is_Region(), "");

View File

@ -464,17 +464,17 @@ void Matcher::init_first_stack_mask() {
C->FIRST_STACK_mask().Clear();
// Add in the incoming argument area
OptoReg::Name init = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
for (i = init; i < _in_arg_limit; i = OptoReg::add(i,1))
OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
C->FIRST_STACK_mask().Insert(i);
}
// Add in all bits past the outgoing argument area
guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
"must be able to represent all call arguments in reg mask");
init = _out_arg_limit;
for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
OptoReg::Name init = _out_arg_limit;
for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
C->FIRST_STACK_mask().Insert(i);
}
// Finally, set the "infinite stack" bit.
C->FIRST_STACK_mask().set_AllStack();
@ -506,16 +506,36 @@ void Matcher::init_first_stack_mask() {
idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
}
if (Matcher::vector_size_supported(T_FLOAT,2)) {
// For VecD we need dual alignment and 8 bytes (2 slots) for spills.
// RA guarantees such alignment since it is needed for Double and Long values.
*idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
}
if (Matcher::vector_size_supported(T_FLOAT,4)) {
// For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
//
// RA can use input arguments stack slots for spills but until RA
// we don't know frame size and offset of input arg stack slots.
//
// Exclude last input arg stack slots to avoid spilling vectors there
// otherwise vector spills could stomp over stack slots in caller frame.
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
aligned_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
}
if (Matcher::vector_size_supported(T_FLOAT,8)) {
// For VecY we need octo alignment and 32 bytes (8 slots) for spills.
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
aligned_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];

View File

@ -49,7 +49,7 @@ public:
virtual Node* Identity(PhaseTransform* phase) { return this; }
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) { return NULL; }
virtual const Type* Value(PhaseTransform* phase) const { return bottom_type(); }
virtual uint hash() const { return Node::hash(); }
virtual uint hash() const { return NO_HASH; }
virtual bool is_CFG() const { return false; }
virtual uint ideal_reg() const { return NotAMachineReg; }

View File

@ -53,6 +53,8 @@
#include "compiler/compileBroker.hpp"
#include "runtime/compilationPolicy.hpp"
#define SIZE_T_MAX_VALUE ((size_t) -1)
bool WhiteBox::_used = false;
WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
@ -105,10 +107,116 @@ WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT,
p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
p->min_alignment(), p->max_alignment());
p->space_alignment(), p->heap_alignment());
}
WB_END
#ifndef PRODUCT
// Forward declaration
void TestReservedSpace_test();
void TestReserveMemorySpecial_test();
void TestVirtualSpace_test();
void TestMetaspaceAux_test();
#endif
WB_ENTRY(void, WB_RunMemoryUnitTests(JNIEnv* env, jobject o))
#ifndef PRODUCT
TestReservedSpace_test();
TestReserveMemorySpecial_test();
TestVirtualSpace_test();
TestMetaspaceAux_test();
#endif
WB_END
WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
size_t granularity = os::vm_allocation_granularity();
ReservedHeapSpace rhs(100 * granularity, granularity, false, NULL);
VirtualSpace vs;
vs.initialize(rhs, 50 * granularity);
//Check if constraints are complied
if (!( UseCompressedOops && rhs.base() != NULL &&
Universe::narrow_oop_base() != NULL &&
Universe::narrow_oop_use_implicit_null_checks() )) {
tty->print_cr("WB_ReadFromNoaccessArea method is useless:\n "
"\tUseCompressedOops is %d\n"
"\trhs.base() is "PTR_FORMAT"\n"
"\tUniverse::narrow_oop_base() is "PTR_FORMAT"\n"
"\tUniverse::narrow_oop_use_implicit_null_checks() is %d",
UseCompressedOops,
rhs.base(),
Universe::narrow_oop_base(),
Universe::narrow_oop_use_implicit_null_checks());
return;
}
tty->print_cr("Reading from no access area... ");
tty->print_cr("*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ) = %c",
*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ));
WB_END
static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
size_t magnitude, size_t iterations) {
size_t granularity = os::vm_allocation_granularity();
ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false, NULL);
VirtualSpace vs;
if (!vs.initialize(rhs, 0)) {
tty->print_cr("Failed to initialize VirtualSpace. Can't proceed.");
return 3;
}
long seed = os::random();
tty->print_cr("Random seed is %ld", seed);
os::init_random(seed);
for (size_t i = 0; i < iterations; i++) {
// Whether we will shrink or grow
bool shrink = os::random() % 2L == 0;
// Get random delta to resize virtual space
size_t delta = (size_t)os::random() % magnitude;
// If we are about to shrink virtual space below zero, then expand instead
if (shrink && vs.committed_size() < delta) {
shrink = false;
}
// Resizing by delta
if (shrink) {
vs.shrink_by(delta);
} else {
// If expanding fails expand_by will silently return false
vs.expand_by(delta, true);
}
}
return 0;
}
WB_ENTRY(jint, WB_StressVirtualSpaceResize(JNIEnv* env, jobject o,
jlong reserved_space_size, jlong magnitude, jlong iterations))
tty->print_cr("reservedSpaceSize="JLONG_FORMAT", magnitude="JLONG_FORMAT", "
"iterations="JLONG_FORMAT"\n", reserved_space_size, magnitude,
iterations);
if (reserved_space_size < 0 || magnitude < 0 || iterations < 0) {
tty->print_cr("One of variables printed above is negative. Can't proceed.\n");
return 1;
}
// sizeof(size_t) depends on whether OS is 32bit or 64bit. sizeof(jlong) is
// always 8 byte. That's why we should avoid overflow in case of 32bit platform.
if (sizeof(size_t) < sizeof(jlong)) {
jlong size_t_max_value = (jlong) SIZE_T_MAX_VALUE;
if (reserved_space_size > size_t_max_value || magnitude > size_t_max_value
|| iterations > size_t_max_value) {
tty->print_cr("One of variables printed above overflows size_t. Can't proceed.\n");
return 2;
}
}
return wb_stress_virtual_space_resize((size_t) reserved_space_size,
(size_t) magnitude, (size_t) iterations);
WB_END
#if INCLUDE_ALL_GCS
WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
G1CollectedHeap* g1 = G1CollectedHeap::heap();
@ -445,6 +553,9 @@ static JNINativeMethod methods[] = {
{CC"getCompressedOopsMaxHeapSize", CC"()J",
(void*)&WB_GetCompressedOopsMaxHeapSize},
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
{CC"runMemoryUnitTests", CC"()V", (void*)&WB_RunMemoryUnitTests},
{CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea},
{CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize},
#if INCLUDE_ALL_GCS
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },

View File

@ -1132,9 +1132,6 @@ void Arguments::set_tiered_flags() {
Tier3InvokeNotifyFreqLog = 0;
Tier4InvocationThreshold = 0;
}
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
}
}
#if INCLUDE_ALL_GCS
@ -1408,7 +1405,7 @@ uintx Arguments::max_heap_for_compressed_oops() {
// NULL page is located before the heap, we pad the NULL page to the conservative
// maximum alignment that the GC may ever impose upon the heap.
size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
Arguments::conservative_max_heap_alignment());
_conservative_max_heap_alignment);
LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
NOT_LP64(ShouldNotReachHere(); return 0);
@ -1505,7 +1502,7 @@ void Arguments::set_conservative_max_heap_alignment() {
}
#endif // INCLUDE_ALL_GCS
_conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
CollectorPolicy::compute_max_alignment());
CollectorPolicy::compute_heap_alignment());
}
void Arguments::set_ergonomics_flags() {
@ -2165,6 +2162,10 @@ bool Arguments::check_vm_args_consistency() {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
status = status && verify_percentage(G1NewSizePercent, "G1NewSizePercent");
status = status && verify_percentage(G1MaxNewSizePercent, "G1MaxNewSizePercent");
status = status && verify_interval(G1NewSizePercent, 0, G1MaxNewSizePercent, "G1NewSizePercent");
status = status && verify_percentage(InitiatingHeapOccupancyPercent,
"InitiatingHeapOccupancyPercent");
status = status && verify_min_value(G1RefProcDrainInterval, 1,
@ -2681,9 +2682,10 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
describe_range_error(errcode);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size);
set_min_heap_size((uintx)long_initial_heap_size);
// Currently the minimum size and the initial heap sizes are the same.
set_min_heap_size(InitialHeapSize);
// Can be overridden with -XX:InitialHeapSize.
FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size);
// -Xmx
} else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) {
julong long_max_heap_size = 0;
@ -3643,6 +3645,11 @@ jint Arguments::apply_ergo() {
"Incompatible compilation policy selected", NULL);
}
}
// Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered)
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
}
// Set heap size based on available physical memory
set_heap_size();

View File

@ -321,6 +321,8 @@ void Flag::print_kind(outputStream* st) {
{ KIND_PRODUCT, "product" },
{ KIND_MANAGEABLE, "manageable" },
{ KIND_DIAGNOSTIC, "diagnostic" },
{ KIND_EXPERIMENTAL, "experimental" },
{ KIND_COMMERCIAL, "commercial" },
{ KIND_NOT_PRODUCT, "notproduct" },
{ KIND_DEVELOP, "develop" },
{ KIND_LP64_PRODUCT, "lp64_product" },

View File

@ -2954,6 +2954,9 @@ class CommandLineFlags {
product(intx, MaxRecursiveInlineLevel, 1, \
"maximum number of nested recursive calls that are inlined") \
\
develop(intx, MaxForceInlineLevel, 100, \
"maximum number of nested @ForceInline calls that are inlined") \
\
product_pd(intx, InlineSmallCode, \
"Only inline already compiled methods if their code size is " \
"less than this") \
@ -3019,9 +3022,6 @@ class CommandLineFlags {
notproduct(intx, ZombieALotInterval, 5, \
"Number of exits until ZombieALot kicks in") \
\
develop(bool, StressNonEntrant, false, \
"Mark nmethods non-entrant at registration") \
\
diagnostic(intx, MallocVerifyInterval, 0, \
"If non-zero, verify C heap after every N calls to " \
"malloc/realloc/free") \
@ -3289,7 +3289,7 @@ class CommandLineFlags {
"Exit the VM if we fill the code cache") \
\
product(bool, UseCodeCacheFlushing, true, \
"Attempt to clean the code cache before shutting off compiler") \
"Remove cold/old nmethods from the code cache") \
\
/* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \
@ -3622,9 +3622,6 @@ class CommandLineFlags {
"Temporary flag for transition to AbstractMethodError wrapped " \
"in InvocationTargetException. See 6531596") \
\
develop(bool, VerifyLambdaBytecodes, false, \
"Force verification of jdk 8 lambda metafactory bytecodes") \
\
develop(intx, FastSuperclassLimit, 8, \
"Depth of hardwired instanceof accelerator array") \
\

View File

@ -470,12 +470,6 @@ bool Reflection::verify_class_access(Klass* current_class, Klass* new_class, boo
return true;
}
// Also allow all accesses from
// java/lang/invoke/MagicLambdaImpl subclasses to succeed trivially.
if (current_class->is_subclass_of(SystemDictionary::lambda_MagicLambdaImpl_klass())) {
return true;
}
return can_relax_access_check_for(current_class, new_class, classloader_only);
}
@ -570,12 +564,6 @@ bool Reflection::verify_field_access(Klass* current_class,
return true;
}
// Also allow all accesses from
// java/lang/invoke/MagicLambdaImpl subclasses to succeed trivially.
if (current_class->is_subclass_of(SystemDictionary::lambda_MagicLambdaImpl_klass())) {
return true;
}
return can_relax_access_check_for(
current_class, field_class, classloader_only);
}

View File

@ -84,6 +84,7 @@
// Shared stub locations
RuntimeStub* SharedRuntime::_wrong_method_blob;
RuntimeStub* SharedRuntime::_wrong_method_abstract_blob;
RuntimeStub* SharedRuntime::_ic_miss_blob;
RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
@ -101,11 +102,12 @@ UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
//----------------------------generate_stubs-----------------------------------
void SharedRuntime::generate_stubs() {
_wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub");
_ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
_resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call");
_resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call");
_resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call");
_wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub");
_wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
_ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
_resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call");
_resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call");
_resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call");
#ifdef COMPILER2
// Vectors are generated only by C2.
@ -1345,6 +1347,11 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
return callee_method->verified_code_entry();
JRT_END
// Handle abstract method call
JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread))
return StubRoutines::throw_AbstractMethodError_entry();
JRT_END
// resolve a static call and patch code
JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
@ -2341,12 +2348,13 @@ void AdapterHandlerLibrary::initialize() {
// Create a special handler for abstract methods. Abstract methods
// are never compiled so an i2c entry is somewhat meaningless, but
// fill it in with something appropriate just in case. Pass handle
// wrong method for the c2i transitions.
address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
// throw AbstractMethodError just in case.
// Pass wrong_method_abstract for the c2i transitions to return
// AbstractMethodError for invalid invocations.
address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
_abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
StubRoutines::throw_AbstractMethodError_entry(),
wrong_method, wrong_method);
wrong_method_abstract, wrong_method_abstract);
}
AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,

View File

@ -56,6 +56,7 @@ class SharedRuntime: AllStatic {
// Shared stub locations
static RuntimeStub* _wrong_method_blob;
static RuntimeStub* _wrong_method_abstract_blob;
static RuntimeStub* _ic_miss_blob;
static RuntimeStub* _resolve_opt_virtual_call_blob;
static RuntimeStub* _resolve_virtual_call_blob;
@ -206,6 +207,11 @@ class SharedRuntime: AllStatic {
return _wrong_method_blob->entry_point();
}
static address get_handle_wrong_method_abstract_stub() {
assert(_wrong_method_abstract_blob!= NULL, "oops");
return _wrong_method_abstract_blob->entry_point();
}
#ifdef COMPILER2
static void generate_uncommon_trap_blob(void);
static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; }
@ -481,6 +487,7 @@ class SharedRuntime: AllStatic {
// handle ic miss with caller being compiled code
// wrong method handling (inline cache misses, zombie methods)
static address handle_wrong_method(JavaThread* thread);
static address handle_wrong_method_abstract(JavaThread* thread);
static address handle_wrong_method_ic_miss(JavaThread* thread);
#ifndef PRODUCT

View File

@ -112,14 +112,13 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
if (_records != NULL) {
_records[_sweep_index].traversal = _traversals;
_records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
_records[_sweep_index].invocation = _invocations;
_records[_sweep_index].invocation = _sweep_fractions_left;
_records[_sweep_index].compile_id = nm->compile_id();
_records[_sweep_index].kind = nm->compile_kind();
_records[_sweep_index].state = nm->_state;
_records[_sweep_index].vep = nm->verified_entry_point();
_records[_sweep_index].uep = nm->entry_point();
_records[_sweep_index].line = line;
_sweep_index = (_sweep_index + 1) % SweeperLogEntries;
}
}
@ -127,26 +126,29 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
#define SWEEP(nm)
#endif
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed
int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache
int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass
volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant
// 2) not_entrant -> zombie
// 3) zombie -> marked_for_reclamation
jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool NMethodSweeper::_request_mark_phase = false;
int NMethodSweeper::_total_nof_methods_reclaimed = 0;
jlong NMethodSweeper::_total_time_sweeping = 0;
jlong NMethodSweeper::_total_time_this_sweep = 0;
jlong NMethodSweeper::_peak_sweep_time = 0;
jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
int NMethodSweeper::_hotness_counter_reset_val = 0;
int NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
jlong NMethodSweeper::_total_time_sweeping = 0; // Accumulated time sweeping
jlong NMethodSweeper::_total_time_this_sweep = 0; // Total time this sweep
jlong NMethodSweeper::_peak_sweep_time = 0; // Peak time for a full sweep
jlong NMethodSweeper::_peak_sweep_fraction_time = 0; // Peak time sweeping one fraction
int NMethodSweeper::_hotness_counter_reset_val = 0;
class MarkActivationClosure: public CodeBlobClosure {
@ -197,13 +199,16 @@ void NMethodSweeper::mark_active_nmethods() {
return;
}
// Increase time so that we can estimate when to invoke the sweeper again.
_time_counter++;
// Check for restart
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
if (!sweep_in_progress() && need_marking_phase()) {
_seen = 0;
_invocations = NmethodSweepFraction;
_current = CodeCache::first_nmethod();
_traversals += 1;
if (!sweep_in_progress()) {
_seen = 0;
_sweep_fractions_left = NmethodSweepFraction;
_current = CodeCache::first_nmethod();
_traversals += 1;
_total_time_this_sweep = 0;
if (PrintMethodFlushing) {
@ -211,10 +216,6 @@ void NMethodSweeper::mark_active_nmethods() {
}
Threads::nmethods_do(&mark_activation_closure);
// reset the flags since we started a scan from the beginning.
reset_nmethod_marking();
_locked_seen = 0;
_not_entrant_seen_on_stack = 0;
} else {
// Only set hotness counter
Threads::nmethods_do(&set_hotness_closure);
@ -222,14 +223,49 @@ void NMethodSweeper::mark_active_nmethods() {
OrderAccess::storestore();
}
/**
* This function invokes the sweeper if at least one of the three conditions is met:
* (1) The code cache is getting full
* (2) There are sufficient state changes in/since the last sweep.
* (3) We have not been sweeping for 'some time'
*/
void NMethodSweeper::possibly_sweep() {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
if (!MethodFlushing || !sweep_in_progress()) {
// Only compiler threads are allowed to sweep
if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
return;
}
if (_invocations > 0) {
// If there was no state change while nmethod sweeping, 'should_sweep' will be false.
// This is one of the two places where should_sweep can be set to true. The general
// idea is as follows: If there is enough free space in the code cache, there is no
// need to invoke the sweeper. The following formula (which determines whether to invoke
// the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
// we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
// the formula considers how much space in the code cache is currently used. Here are
// some examples that will (hopefully) help in understanding.
//
// Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
// the result of the division is 0. This
// keeps the used code cache size small
// (important for embedded Java)
// Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
// computes: (256 / 16) - 1 = 15
// As a result, we invoke the sweeper after
// 15 invocations of 'mark_active_nmethods.
// Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
// computes: (256 / 16) - 10 = 6.
if (!_should_sweep) {
int time_since_last_sweep = _time_counter - _last_sweep;
double wait_until_next_sweep = (ReservedCodeCacheSize / (16 * M)) - time_since_last_sweep -
CodeCache::reverse_free_ratio();
if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
_should_sweep = true;
}
}
if (_should_sweep && _sweep_fractions_left > 0) {
// Only one thread at a time will sweep
jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
if (old != 0) {
@ -242,31 +278,46 @@ void NMethodSweeper::possibly_sweep() {
memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
}
#endif
if (_invocations > 0) {
if (_sweep_fractions_left > 0) {
sweep_code_cache();
_invocations--;
_sweep_fractions_left--;
}
// We are done with sweeping the code cache once.
if (_sweep_fractions_left == 0) {
_last_sweep = _time_counter;
// Reset flag; temporarily disables sweeper
_should_sweep = false;
// If there was enough state change, 'possibly_enable_sweeper()'
// sets '_should_sweep' to true
possibly_enable_sweeper();
// Reset _bytes_changed only if there was enough state change. _bytes_changed
// can further increase by calls to 'report_state_change'.
if (_should_sweep) {
_bytes_changed = 0;
}
}
_sweep_started = 0;
}
}
void NMethodSweeper::sweep_code_cache() {
jlong sweep_start_counter = os::elapsed_counter();
_flushed_count = 0;
_zombified_count = 0;
_marked_count = 0;
_flushed_count = 0;
_zombified_count = 0;
_marked_for_reclamation_count = 0;
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
}
if (!CompileBroker::should_compile_new_jobs()) {
// If we have turned off compilations we might as well do full sweeps
// in order to reach the clean state faster. Otherwise the sleeping compiler
// threads will slow down sweeping.
_invocations = 1;
_sweep_fractions_left = 1;
}
// We want to visit all nmethods after NmethodSweepFraction
@ -274,7 +325,7 @@ void NMethodSweeper::sweep_code_cache() {
// remaining number of invocations. This is only an estimate since
// the number of nmethods changes during the sweep so the final
// stage must iterate until it there are no more nmethods.
int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
int swept_count = 0;
@ -286,11 +337,11 @@ void NMethodSweeper::sweep_code_cache() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The last invocation iterates until there are no more nmethods
for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
swept_count++;
if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@ -314,19 +365,7 @@ void NMethodSweeper::sweep_code_cache() {
}
}
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggressively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if (PrintMethodFlushing) {
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
}
}
assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
jlong sweep_end_counter = os::elapsed_counter();
jlong sweep_time = sweep_end_counter - sweep_start_counter;
@ -340,21 +379,21 @@ void NMethodSweeper::sweep_code_cache() {
event.set_starttime(sweep_start_counter);
event.set_endtime(sweep_end_counter);
event.set_sweepIndex(_traversals);
event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
event.set_sweptCount(swept_count);
event.set_flushedCount(_flushed_count);
event.set_markedCount(_marked_count);
event.set_markedCount(_marked_for_reclamation_count);
event.set_zombifiedCount(_zombified_count);
event.commit();
}
#ifdef ASSERT
if(PrintMethodFlushing) {
tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time);
}
#endif
if (_invocations == 1) {
if (_sweep_fractions_left == 1) {
_peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
log_sweep("finished");
}
@ -368,12 +407,37 @@ void NMethodSweeper::sweep_code_cache() {
// it only makes sense to re-enable compilation if we have actually freed memory.
// Note that typically several kB are released for sweeping 16MB of the code
// cache. As a result, 'freed_memory' > 0 to restart the compiler.
if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
log_sweep("restart_compiler");
}
}
/**
* This function updates the sweeper statistics that keep track of nmethods
* state changes. If there is 'enough' state change, the sweeper is invoked
* as soon as possible. There can be data races on _bytes_changed. The data
* races are benign, since it does not matter if we loose a couple of bytes.
* In the worst case we call the sweeper a little later. Also, we are guaranteed
* to invoke the sweeper if the code cache gets full.
*/
void NMethodSweeper::report_state_change(nmethod* nm) {
_bytes_changed += nm->total_size();
possibly_enable_sweeper();
}
/**
* Function determines if there was 'enough' state change in the code cache to invoke
* the sweeper again. Currently, we determine 'enough' as more than 1% state change in
* the code cache since the last sweep.
*/
void NMethodSweeper::possibly_enable_sweeper() {
double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
if (percent_changed > 1.0) {
_should_sweep = true;
}
}
class NMethodMarker: public StackObj {
private:
CompilerThread* _thread;
@ -424,9 +488,6 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
} else {
_locked_seen++;
SWEEP(nm);
}
return freed_memory;
}
@ -448,8 +509,9 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
}
nm->mark_for_reclamation();
request_nmethod_marking();
_marked_count++;
// Keep track of code cache state change
_bytes_changed += nm->total_size();
_marked_for_reclamation_count++;
SWEEP(nm);
}
} else if (nm->is_not_entrant()) {
@ -459,18 +521,14 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
}
// Code cache state change is tracked in make_zombie()
nm->make_zombie();
request_nmethod_marking();
_zombified_count++;
SWEEP(nm);
} else {
// Still alive, clean up its inline caches
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
// we coudn't transition this nmethod so don't immediately
// request a rescan. If this method stays on the stack for a
// long time we don't want to keep rescanning the code cache.
_not_entrant_seen_on_stack++;
SWEEP(nm);
}
} else if (nm->is_unloaded()) {
@ -485,8 +543,8 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
release_nmethod(nm);
_flushed_count++;
} else {
// Code cache state change is tracked in make_zombie()
nm->make_zombie();
request_nmethod_marking();
_zombified_count++;
SWEEP(nm);
}
@ -514,7 +572,11 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
// The second condition ensures that methods are not immediately made not-entrant
// after compilation.
nm->make_not_entrant();
request_nmethod_marking();
// Code cache state change is tracked in make_not_entrant()
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
}
}
}
}

View File

@ -53,22 +53,22 @@
// is full.
class NMethodSweeper : public AllStatic {
static long _traversals; // Stack scan count, also sweep ID.
static nmethod* _current; // Current nmethod
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static int _flushed_count; // Nof. nmethods flushed in current sweep
static int _zombified_count; // Nof. nmethods made zombie in current sweep
static int _marked_count; // Nof. nmethods marked for reclaim in current sweep
static volatile int _invocations; // No. of invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
//The following are reset in mark_active_nmethods and synchronized by the safepoint
static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse,
// always checked and reset at a safepoint so memory will be in sync.
static int _locked_seen; // Number of locked nmethods encountered during the scan
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
static long _traversals; // Stack scan count, also sweep ID.
static long _time_counter; // Virtual time used to periodically invoke sweeper
static long _last_sweep; // Value of _time_counter when the last sweep happened
static nmethod* _current; // Current nmethod
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static int _flushed_count; // Nof. nmethods flushed in current sweep
static int _zombified_count; // Nof. nmethods made zombie in current sweep
static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant
// 2) not_entrant -> zombie
// 3) zombie -> marked_for_reclamation
// Stat counters
static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed
static jlong _total_time_sweeping; // Accumulated time sweeping
@ -81,9 +81,6 @@ class NMethodSweeper : public AllStatic {
static bool sweep_in_progress();
static void sweep_code_cache();
static void request_nmethod_marking() { _request_mark_phase = true; }
static void reset_nmethod_marking() { _request_mark_phase = false; }
static bool need_marking_phase() { return _request_mark_phase; }
static int _hotness_counter_reset_val;
@ -109,13 +106,8 @@ class NMethodSweeper : public AllStatic {
static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2);
static int hotness_counter_reset_val();
static void notify() {
// Request a new sweep of the code cache from the beginning. No
// need to synchronize the setting of this flag since it only
// changes to false at safepoint so we can never overwrite it with false.
request_nmethod_marking();
}
static void report_state_change(nmethod* nm);
static void possibly_enable_sweeper();
};
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP

View File

@ -200,6 +200,12 @@ void ThreadService::oops_do(OopClosure* f) {
}
}
void ThreadService::metadata_do(void f(Metadata*)) {
for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
dump->metadata_do(f);
}
}
void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
MutexLocker ml(Management_lock);
if (_threaddump_list == NULL) {
@ -451,9 +457,16 @@ void ThreadDumpResult::oops_do(OopClosure* f) {
}
}
void ThreadDumpResult::metadata_do(void f(Metadata*)) {
for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
ts->metadata_do(f);
}
}
StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
_method = jvf->method();
_bci = jvf->bci();
_class_holder = _method->method_holder()->klass_holder();
_locked_monitors = NULL;
if (with_lock_info) {
ResourceMark rm;
@ -477,6 +490,11 @@ void StackFrameInfo::oops_do(OopClosure* f) {
f->do_oop((oop*) _locked_monitors->adr_at(i));
}
}
f->do_oop(&_class_holder);
}
void StackFrameInfo::metadata_do(void f(Metadata*)) {
f(_method);
}
void StackFrameInfo::print_on(outputStream* st) const {
@ -620,6 +638,14 @@ void ThreadStackTrace::oops_do(OopClosure* f) {
}
}
void ThreadStackTrace::metadata_do(void f(Metadata*)) {
int length = _frames->length();
for (int i = 0; i < length; i++) {
_frames->at(i)->metadata_do(f);
}
}
ConcurrentLocksDump::~ConcurrentLocksDump() {
if (_retain_map_on_free) {
return;
@ -823,6 +849,13 @@ void ThreadSnapshot::oops_do(OopClosure* f) {
}
}
void ThreadSnapshot::metadata_do(void f(Metadata*)) {
if (_stack_trace != NULL) {
_stack_trace->metadata_do(f);
}
}
DeadlockCycle::DeadlockCycle() {
_is_deadlock = false;
_threads = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true);

View File

@ -113,6 +113,7 @@ public:
// GC support
static void oops_do(OopClosure* f);
static void metadata_do(void f(Metadata*));
};
// Per-thread Statistics for synchronization
@ -242,6 +243,7 @@ public:
void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors);
void set_concurrent_locks(ThreadConcurrentLocks* l) { _concurrent_locks = l; }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
};
class ThreadStackTrace : public CHeapObj<mtInternal> {
@ -265,6 +267,7 @@ class ThreadStackTrace : public CHeapObj<mtInternal> {
void dump_stack_at_safepoint(int max_depth);
Handle allocate_fill_stack_trace_element_array(TRAPS);
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
GrowableArray<oop>* jni_locked_monitors() { return _jni_locked_monitors; }
int num_jni_locked_monitors() { return (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0); }
@ -280,6 +283,9 @@ class StackFrameInfo : public CHeapObj<mtInternal> {
Method* _method;
int _bci;
GrowableArray<oop>* _locked_monitors; // list of object monitors locked by this frame
// We need to save the mirrors in the backtrace to keep the class
// from being unloaded while we still have this stack trace.
oop _class_holder;
public:
@ -289,9 +295,10 @@ class StackFrameInfo : public CHeapObj<mtInternal> {
delete _locked_monitors;
}
};
Method* method() const { return _method; }
Method* method() const { return _method; }
int bci() const { return _bci; }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
int num_locked_monitors() { return (_locked_monitors != NULL ? _locked_monitors->length() : 0); }
GrowableArray<oop>* locked_monitors() { return _locked_monitors; }
@ -354,6 +361,7 @@ class ThreadDumpResult : public StackObj {
int num_snapshots() { return _num_snapshots; }
ThreadSnapshot* snapshots() { return _snapshots; }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
};
class DeadlockCycle : public CHeapObj<mtInternal> {

View File

@ -456,6 +456,13 @@ inline void* align_pointer_up(const void* addr, size_t size) {
return (void*) align_size_up_((uintptr_t)addr, size);
}
// Align down with a lower bound. If the aligning results in 0, return 'alignment'.
inline size_t align_size_down_bounded(size_t size, size_t alignment) {
size_t aligned_size = align_size_down_(size, alignment);
return aligned_size > 0 ? aligned_size : alignment;
}
// Clamp an address to be within a specific page
// 1. If addr is on the page it is returned as is
// 2. If addr is above the page_address the start of the *next* page will be returned

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8028207
* @summary Verify that GVN doesn't mess up the two addExacts
* @compile GVNTest.java
* @run main GVNTest
*
*/
public class GVNTest {
public static int result = 0;
public static int value = 93;
public static void main(String[] args) {
for (int i = 0; i < 50000; ++i) {
result = runTest(value + i);
result = runTest(value + i);
result = runTest(value + i);
result = runTest(value + i);
result = runTest(value + i);
}
}
public static int runTest(int value) {
int v = value + value;
int sum = 0;
if (v < 4032) {
for (int i = 0; i < 1023; ++i) {
sum += Math.addExact(value, value);
}
} else {
for (int i = 0; i < 321; ++i) {
sum += Math.addExact(value, value);
}
}
return sum + v;
}
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8028198
* @summary Verify that split through phi does the right thing
* @compile SplitThruPhiTest.java
* @run main SplitThruPhiTest
*
*/
public class SplitThruPhiTest {
public static volatile int value = 19;
public static int store = 0;
public static void main(String[] args) {
for (int i = 0; i < 150000; ++i) {
store = runTest(value);
}
}
public static int runTest(int val) {
int result = Math.addExact(val, 1);
int total = 0;
for (int i = val; i < 200; i = Math.addExact(i, 1)) {
total += i;
}
return total;
}
}

View File

@ -172,7 +172,6 @@ public class ConcurrentClassLoadingTest {
"java.lang.invoke.LambdaConversionException",
"java.lang.invoke.LambdaForm",
"java.lang.invoke.LambdaMetafactory",
"java.lang.invoke.MagicLambdaImpl",
"java.lang.invoke.MemberName",
"java.lang.invoke.MethodHandle",
"java.lang.invoke.MethodHandleImpl",

View File

@ -0,0 +1,102 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8027631
* @summary profiling of arguments at calls cannot rely on signature of callee for types
* @run main/othervm -XX:-BackgroundCompilation -XX:TieredStopAtLevel=3 -XX:TypeProfileLevel=111 -XX:Tier3InvocationThreshold=200 -XX:Tier0InvokeNotifyFreqLog=7 TestUnexpectedProfilingMismatch
*
*/
import java.lang.invoke.*;
public class TestUnexpectedProfilingMismatch {
static class A {
}
static class B {
}
static void mA(A a) {
}
static void mB(B b) {
}
static final MethodHandle mhA;
static final MethodHandle mhB;
static {
MethodHandles.Lookup lookup = MethodHandles.lookup();
MethodType mt = MethodType.methodType(void.class, A.class);
MethodHandle res = null;
try {
res = lookup.findStatic(TestUnexpectedProfilingMismatch.class, "mA", mt);
} catch(NoSuchMethodException ex) {
} catch(IllegalAccessException ex) {
}
mhA = res;
mt = MethodType.methodType(void.class, B.class);
try {
res = lookup.findStatic(TestUnexpectedProfilingMismatch.class, "mB", mt);
} catch(NoSuchMethodException ex) {
} catch(IllegalAccessException ex) {
}
mhB = res;
}
void m1(A a, boolean doit) throws Throwable {
if (doit) {
mhA.invoke(a);
}
}
void m2(B b) throws Throwable {
mhB.invoke(b);
}
static public void main(String[] args) {
TestUnexpectedProfilingMismatch tih = new TestUnexpectedProfilingMismatch();
A a = new A();
B b = new B();
try {
for (int i = 0; i < 256 - 1; i++) {
tih.m1(a, true);
}
// Will trigger the compilation but will also run once
// more interpreted with a non null MDO which it will
// update. Make it skip the body of the method.
tih.m1(a, false);
// Compile this one as well and do the profiling
for (int i = 0; i < 256; i++) {
tih.m2(b);
}
// Will run and see a conflict
tih.m1(a, true);
} catch(Throwable ex) {
ex.printStackTrace();
}
System.out.println("TEST PASSED");
}
}

View File

@ -21,8 +21,5 @@
* questions.
*/
// key: compiler.err.intf.or.array.expected.here
import java.util.List;
class InterfaceExpected<T extends List & String> { }
public class B {
}

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8027572
* @summary class unloading resets profile, method compiled after the profile is first set and before class loading sets unknown bit with not recorded class
* @build B
* @run main/othervm -XX:TypeProfileLevel=222 -XX:-BackgroundCompilation TestProfileConflictClassUnloading
*
*/
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.file.Paths;
public class TestProfileConflictClassUnloading {
static class A {
}
static void m1(Object o) {
}
static void m2(Object o) {
m1(o);
}
static void m3(A a, boolean do_call) {
if (!do_call) {
return;
}
m2(a);
}
public static ClassLoader newClassLoader() {
try {
return new URLClassLoader(new URL[] {
Paths.get(System.getProperty("test.classes",".")).toUri().toURL(),
}, null);
} catch (MalformedURLException e){
throw new RuntimeException("Unexpected URL conversion failure", e);
}
}
public static void main(String[] args) throws Exception {
ClassLoader loader = newClassLoader();
Object o = loader.loadClass("B").newInstance();
// collect conflicting profiles
for (int i = 0; i < 5000; i++) {
m2(o);
}
// prepare for conflict
A a = new A();
for (int i = 0; i < 5000; i++) {
m3(a, false);
}
// unload class in profile
o = null;
loader = null;
System.gc();
// record the conflict
m3(a, true);
// trigger another GC
System.gc();
}
}

View File

@ -0,0 +1,294 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8028308
* @summary rbp not restored when stack overflow is thrown from deopt/uncommon trap blobs
* @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=dontinline,TestStackBangRbp::m1 -XX:CompileCommand=exclude,TestStackBangRbp::m2 -Xss256K -XX:-UseOnStackReplacement TestStackBangRbp
*
*/
public class TestStackBangRbp {
static class UnloadedClass1 {
}
static class UnloadedClass2 {
}
static Object m1(boolean deopt) {
long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
l508, l509, l510, l511;
long ll0, ll1, ll2, ll3, ll4, ll5, ll6, ll7, ll8, ll9, ll10, ll11, ll12,
ll13, ll14, ll15, ll16, ll17, ll18, ll19, ll20, ll21, ll22, ll23, ll24,
ll25, ll26, ll27, ll28, ll29, ll30, ll31, ll32, ll33, ll34, ll35, ll36,
ll37, ll38, ll39, ll40, ll41, ll42, ll43, ll44, ll45, ll46, ll47, ll48,
ll49, ll50, ll51, ll52, ll53, ll54, ll55, ll56, ll57, ll58, ll59, ll60,
ll61, ll62, ll63, ll64, ll65, ll66, ll67, ll68, ll69, ll70, ll71, ll72,
ll73, ll74, ll75, ll76, ll77, ll78, ll79, ll80, ll81, ll82, ll83, ll84,
ll85, ll86, ll87, ll88, ll89, ll90, ll91, ll92, ll93, ll94, ll95, ll96,
ll97, ll98, ll99, ll100, ll101, ll102, ll103, ll104, ll105, ll106, ll107,
ll108, ll109, ll110, ll111, ll112, ll113, ll114, ll115, ll116, ll117,
ll118, ll119, ll120, ll121, ll122, ll123, ll124, ll125, ll126, ll127,
ll128, ll129, ll130, ll131, ll132, ll133, ll134, ll135, ll136, ll137,
ll138, ll139, ll140, ll141, ll142, ll143, ll144, ll145, ll146, ll147,
ll148, ll149, ll150, ll151, ll152, ll153, ll154, ll155, ll156, ll157,
ll158, ll159, ll160, ll161, ll162, ll163, ll164, ll165, ll166, ll167,
ll168, ll169, ll170, ll171, ll172, ll173, ll174, ll175, ll176, ll177,
ll178, ll179, ll180, ll181, ll182, ll183, ll184, ll185, ll186, ll187,
ll188, ll189, ll190, ll191, ll192, ll193, ll194, ll195, ll196, ll197,
ll198, ll199, ll200, ll201, ll202, ll203, ll204, ll205, ll206, ll207,
ll208, ll209, ll210, ll211, ll212, ll213, ll214, ll215, ll216, ll217,
ll218, ll219, ll220, ll221, ll222, ll223, ll224, ll225, ll226, ll227,
ll228, ll229, ll230, ll231, ll232, ll233, ll234, ll235, ll236, ll237,
ll238, ll239, ll240, ll241, ll242, ll243, ll244, ll245, ll246, ll247,
ll248, ll249, ll250, ll251, ll252, ll253, ll254, ll255, ll256, ll257,
ll258, ll259, ll260, ll261, ll262, ll263, ll264, ll265, ll266, ll267,
ll268, ll269, ll270, ll271, ll272, ll273, ll274, ll275, ll276, ll277,
ll278, ll279, ll280, ll281, ll282, ll283, ll284, ll285, ll286, ll287,
ll288, ll289, ll290, ll291, ll292, ll293, ll294, ll295, ll296, ll297,
ll298, ll299, ll300, ll301, ll302, ll303, ll304, ll305, ll306, ll307,
ll308, ll309, ll310, ll311, ll312, ll313, ll314, ll315, ll316, ll317,
ll318, ll319, ll320, ll321, ll322, ll323, ll324, ll325, ll326, ll327,
ll328, ll329, ll330, ll331, ll332, ll333, ll334, ll335, ll336, ll337,
ll338, ll339, ll340, ll341, ll342, ll343, ll344, ll345, ll346, ll347,
ll348, ll349, ll350, ll351, ll352, ll353, ll354, ll355, ll356, ll357,
ll358, ll359, ll360, ll361, ll362, ll363, ll364, ll365, ll366, ll367,
ll368, ll369, ll370, ll371, ll372, ll373, ll374, ll375, ll376, ll377,
ll378, ll379, ll380, ll381, ll382, ll383, ll384, ll385, ll386, ll387,
ll388, ll389, ll390, ll391, ll392, ll393, ll394, ll395, ll396, ll397,
ll398, ll399, ll400, ll401, ll402, ll403, ll404, ll405, ll406, ll407,
ll408, ll409, ll410, ll411, ll412, ll413, ll414, ll415, ll416, ll417,
ll418, ll419, ll420, ll421, ll422, ll423, ll424, ll425, ll426, ll427,
ll428, ll429, ll430, ll431, ll432, ll433, ll434, ll435, ll436, ll437,
ll438, ll439, ll440, ll441, ll442, ll443, ll444, ll445, ll446, ll447,
ll448, ll449, ll450, ll451, ll452, ll453, ll454, ll455, ll456, ll457,
ll458, ll459, ll460, ll461, ll462, ll463, ll464, ll465, ll466, ll467,
ll468, ll469, ll470, ll471, ll472, ll473, ll474, ll475, ll476, ll477,
ll478, ll479, ll480, ll481, ll482, ll483, ll484, ll485, ll486, ll487,
ll488, ll489, ll490, ll491, ll492, ll493, ll494, ll495, ll496, ll497,
ll498, ll499, ll500, ll501, ll502, ll503, ll504, ll505, ll506, ll507,
ll508, ll509, ll510, ll511;
int i1 = TestStackBangRbp.i1;
int i2 = TestStackBangRbp.i2;
int i3 = TestStackBangRbp.i3;
int i4 = TestStackBangRbp.i4;
int i5 = TestStackBangRbp.i5;
int i6 = TestStackBangRbp.i6;
int i7 = TestStackBangRbp.i7;
int i8 = TestStackBangRbp.i8;
int i9 = TestStackBangRbp.i9;
int i10 = TestStackBangRbp.i10;
int i11 = TestStackBangRbp.i11;
int i12 = TestStackBangRbp.i12;
int i13 = TestStackBangRbp.i13;
int i14 = TestStackBangRbp.i14;
int i15 = TestStackBangRbp.i15;
int i16 = TestStackBangRbp.i16;
TestStackBangRbp.i1 = i1;
TestStackBangRbp.i2 = i2;
TestStackBangRbp.i3 = i3;
TestStackBangRbp.i4 = i4;
TestStackBangRbp.i5 = i5;
TestStackBangRbp.i6 = i6;
TestStackBangRbp.i7 = i7;
TestStackBangRbp.i8 = i8;
TestStackBangRbp.i9 = i9;
TestStackBangRbp.i10 = i10;
TestStackBangRbp.i11 = i11;
TestStackBangRbp.i12 = i12;
TestStackBangRbp.i13 = i13;
TestStackBangRbp.i14 = i14;
TestStackBangRbp.i15 = i15;
TestStackBangRbp.i16 = i16;
if (deopt) {
// deoptimize with integer in rbp
UnloadedClass1 res = new UnloadedClass1(); // forces deopt with c2
return res;
}
return null;
}
static boolean m2(boolean deopt) {
// call m2 recursively until stack overflow. Then call m3 that
// will call m1 and trigger and deopt in m1 while keeping a
// lot of objects live in registers at the call to m1
long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
l508, l509, l510, l511;
boolean do_m3 = false;
try {
do_m3 = m2(deopt);
} catch (StackOverflowError e) {
return true;
}
if (do_m3) {
m3(deopt);
}
return false;
}
static volatile Object o1 = new Object();
static volatile int i1 = 1;
static volatile int i2 = 2;
static volatile int i3 = 3;
static volatile int i4 = 4;
static volatile int i5 = 5;
static volatile int i6 = 6;
static volatile int i7 = 7;
static volatile int i8 = 8;
static volatile int i9 = 9;
static volatile int i10 = 10;
static volatile int i11 = 11;
static volatile int i12 = 12;
static volatile int i13 = 13;
static volatile int i14 = 14;
static volatile int i15 = 15;
static volatile int i16 = 16;
static void m3(boolean deopt) {
Object o1 = TestStackBangRbp.o1;
TestStackBangRbp.o1 = o1;
try {
m1(deopt);
} catch (StackOverflowError e) {
// deoptimize again. rbp holds an integer. It should have an object.
UnloadedClass2 res = new UnloadedClass2(); // forces deopt with c2
}
TestStackBangRbp.o1 = o1;
}
static public void main(String[] args) {
// get m1 & m3 compiled
for (int i = 0; i < 20000; i++) {
m1(false);
m3(false);
}
m2(true);
System.out.println("TEST PASSED");
}
}

View File

@ -64,32 +64,29 @@ class TestMaxHeapSizeTools {
long newPlusOldSize = values[0] + values[1];
long smallValue = newPlusOldSize / 2;
long largeValue = newPlusOldSize * 2;
long maxHeapSize = largeValue + (2 * 1024 * 1024);
// -Xms is not set
checkErgonomics(new String[] { gcflag, "-Xmx16M" }, values, -1, -1);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-XX:InitialHeapSize=" + largeValue }, values, -1, largeValue);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-XX:InitialHeapSize=0" }, values, -1, -1);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize }, values, -1, -1);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-XX:InitialHeapSize=" + smallValue }, values, -1, smallValue);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-XX:InitialHeapSize=" + largeValue }, values, -1, largeValue);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-XX:InitialHeapSize=0" }, values, -1, -1);
// -Xms is set to zero
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms0" }, values, -1, -1);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms0", "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms0", "-XX:InitialHeapSize=" + largeValue }, values, -1, largeValue);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms0", "-XX:InitialHeapSize=0" }, values, -1, -1);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms0" }, values, -1, -1);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms0", "-XX:InitialHeapSize=" + smallValue }, values, -1, smallValue);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms0", "-XX:InitialHeapSize=" + largeValue }, values, -1, largeValue);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms0", "-XX:InitialHeapSize=0" }, values, -1, -1);
// -Xms is set to small value
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + smallValue }, values, -1, -1);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + smallValue, "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + smallValue, "-XX:InitialHeapSize=" + largeValue }, values, smallValue, largeValue);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + smallValue, "-XX:InitialHeapSize=0" }, values, smallValue, -1);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + smallValue }, values, -1, -1);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + smallValue, "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + smallValue, "-XX:InitialHeapSize=" + largeValue }, values, smallValue, largeValue);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + smallValue, "-XX:InitialHeapSize=0" }, values, smallValue, -1);
// -Xms is set to large value
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + largeValue }, values, largeValue, largeValue);
// the next case has already been checked elsewhere and gives an error
// checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + largeValue, "-XX:InitialHeapSize=" + smallValue }, values, smallValue, smallValue);
// the next case has already been checked elsewhere too
// checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + largeValue, "-XX:InitialHeapSize=" + largeValue }, values, values[0], largeValue);
checkErgonomics(new String[] { gcflag, "-Xmx16M", "-Xms" + largeValue, "-XX:InitialHeapSize=0" }, values, largeValue, -1);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + largeValue }, values, largeValue, largeValue);
checkErgonomics(new String[] { gcflag, "-Xmx" + maxHeapSize, "-Xms" + largeValue, "-XX:InitialHeapSize=0" }, values, largeValue, -1);
}
private static long align_up(long value, long alignment) {

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestMaxNewSize
* @key gc
* @bug 7057939
* @summary Make sure that MaxNewSize always has a useful value after argument
* processing.
* @library /testlibrary
* @build TestMaxNewSize
* @run main TestMaxNewSize -XX:+UseSerialGC
* @run main TestMaxNewSize -XX:+UseParallelGC
* @run main TestMaxNewSize -XX:+UseConcMarkSweepGC
* @run main TestMaxNewSize -XX:+UseG1GC
* @author thomas.schatzl@oracle.com, jesper.wilhelmsson@oracle.com
*/
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import com.oracle.java.testlibrary.*;
public class TestMaxNewSize {
private static void checkMaxNewSize(String[] flags, int heapsize) throws Exception {
BigInteger actual = new BigInteger(getMaxNewSize(flags));
System.out.println(actual);
if (actual.compareTo(new BigInteger((new Long(heapsize)).toString())) == 1) {
throw new RuntimeException("MaxNewSize value set to \"" + actual +
"\", expected otherwise when running with the following flags: " + Arrays.asList(flags).toString());
}
}
private static void checkIncompatibleNewSize(String[] flags) throws Exception {
ArrayList<String> finalargs = new ArrayList<String>();
finalargs.addAll(Arrays.asList(flags));
finalargs.add("-version");
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Initial young gen size set larger than the maximum young gen size");
}
private static boolean isRunningG1(String[] args) {
for (int i = 0; i < args.length; i++) {
if (args[i].contains("+UseG1GC")) {
return true;
}
}
return false;
}
private static String getMaxNewSize(String[] flags) throws Exception {
ArrayList<String> finalargs = new ArrayList<String>();
finalargs.addAll(Arrays.asList(flags));
if (isRunningG1(flags)) {
finalargs.add("-XX:G1HeapRegionSize=1M");
}
finalargs.add("-XX:+PrintFlagsFinal");
finalargs.add("-version");
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldHaveExitValue(0);
String stdout = output.getStdout();
//System.out.println(stdout);
return getFlagValue("MaxNewSize", stdout);
}
private static String getFlagValue(String flag, String where) {
Matcher m = Pattern.compile(flag + "\\s+:?=\\s+\\d+").matcher(where);
if (!m.find()) {
throw new RuntimeException("Could not find value for flag " + flag + " in output string");
}
String match = m.group();
return match.substring(match.lastIndexOf(" ") + 1, match.length());
}
public static void main(String args[]) throws Exception {
String gcName = args[0];
final int M32 = 32 * 1024 * 1024;
final int M64 = 64 * 1024 * 1024;
final int M96 = 96 * 1024 * 1024;
final int M128 = 128 * 1024 * 1024;
checkMaxNewSize(new String[] { gcName, "-Xmx128M" }, M128);
checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:NewRatio=5" }, M128);
checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:NewSize=32M" }, M128);
checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:OldSize=96M" }, M128);
checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:MaxNewSize=32M" }, M32);
checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:NewSize=32M", "-XX:MaxNewSize=32M" }, M32);
checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-XX:NewRatio=6", "-XX:MaxNewSize=32M" }, M32);
checkMaxNewSize(new String[] { gcName, "-Xmx128M", "-Xms96M" }, M128);
checkMaxNewSize(new String[] { gcName, "-Xmx96M", "-Xms96M" }, M96);
checkMaxNewSize(new String[] { gcName, "-XX:NewSize=128M", "-XX:MaxNewSize=50M"}, M128);
}
}

View File

@ -0,0 +1,143 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @key regression
* @key gc
* @bug 8027756
* @library /testlibrary /testlibrary/whitebox
* @build TestHumongousCodeCacheRoots
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @summary Humongous objects may have references from the code cache
* @run main TestHumongousCodeCacheRoots
*/
import com.oracle.java.testlibrary.*;
import sun.hotspot.WhiteBox;
import java.util.ArrayList;
import java.util.Arrays;
class TestHumongousCodeCacheRootsHelper {
static final int n = 1000000;
static final int[] AA = new int[n];
static final int[] BB = new int[n];
public static void main(String args[]) throws Exception {
// do some work so that the compiler compiles this method, inlining the
// reference to the integer array (which is a humonguous object) into
// the code cache.
for(int i = 0; i < n; i++) {
AA[i] = 0;
BB[i] = 0;
}
// trigger a GC that checks that the verification code allows humongous
// objects with code cache roots; objects should be all live here.
System.gc();
// deoptimize everyhing: this should make all compiled code zombies.
WhiteBox wb = WhiteBox.getWhiteBox();
wb.deoptimizeAll();
// trigger a GC that checks that the verification code allows humongous
// objects with code cache roots; objects should be all live here.
System.gc();
// wait a little for the code cache sweeper to try to clean up zombie nmethods
// and unregister the code roots.
try { Thread.sleep(5000); } catch (InterruptedException ex) { }
// do some work on the arrays to make sure that they need to be live after the GCs
for(int i = 0; i < n; i++) {
AA[i] = 1;
BB[i] = 10;
}
System.out.println();
}
}
public class TestHumongousCodeCacheRoots {
/**
* Executes a class in a new VM process with the given parameters.
* @param vmargs Arguments to the VM to run
* @param classname Name of the class to run
* @param arguments Arguments to the class
* @param useTestDotJavaDotOpts Use test.java.opts as part of the VM argument string
* @return The OutputAnalyzer with the results for the invocation.
*/
public static OutputAnalyzer runWhiteBoxTest(String[] vmargs, String classname, String[] arguments, boolean useTestDotJavaDotOpts) throws Exception {
ArrayList<String> finalargs = new ArrayList<String>();
String[] whiteboxOpts = new String[] {
"-Xbootclasspath/a:.",
"-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
"-cp", System.getProperty("java.class.path"),
};
if (useTestDotJavaDotOpts) {
// System.getProperty("test.java.opts") is '' if no options is set,
// we need to skip such a result
String[] externalVMOpts = new String[0];
if (System.getProperty("test.java.opts") != null && System.getProperty("test.java.opts").length() != 0) {
externalVMOpts = System.getProperty("test.java.opts").split(" ");
}
finalargs.addAll(Arrays.asList(externalVMOpts));
}
finalargs.addAll(Arrays.asList(vmargs));
finalargs.addAll(Arrays.asList(whiteboxOpts));
finalargs.add(classname);
finalargs.addAll(Arrays.asList(arguments));
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldHaveExitValue(0);
return output;
}
public static void runTest(String compiler, String[] other) throws Exception {
ArrayList<String> joined = new ArrayList<String>();
joined.add(compiler);
joined.addAll(Arrays.asList(other));
runWhiteBoxTest(joined.toArray(new String[0]), TestHumongousCodeCacheRootsHelper.class.getName(),
new String[] {}, false);
}
public static void main(String[] args) throws Exception {
final String[] baseArguments = new String[] {
"-XX:+UseG1GC", "-XX:G1HeapRegionSize=1M", "-Xmx100M", // make sure we get a humongous region
"-XX:+UnlockDiagnosticVMOptions",
"-XX:InitiatingHeapOccupancyPercent=1", // strong code root marking
"-XX:+G1VerifyHeapRegionCodeRoots", "-XX:+VerifyAfterGC", // make sure that verification is run
"-XX:NmethodSweepFraction=1", "-XX:NmethodSweepCheckInterval=1", // make the code cache sweep more predictable
};
runTest("-client", baseArguments);
runTest("-server", baseArguments);
}
}

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Test that touching noaccess area in class ReservedHeapSpace results in SIGSEGV/ACCESS_VIOLATION
* @library /testlibrary /testlibrary/whitebox
* @build ReadFromNoaccessArea
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main ReadFromNoaccessArea
*/
import com.oracle.java.testlibrary.*;
import sun.hotspot.WhiteBox;
public class ReadFromNoaccessArea {
public static void main(String args[]) throws Exception {
if (!Platform.is64bit()) {
System.out.println("ReadFromNoaccessArea tests is useful only on 64bit architecture. Passing silently.");
return;
}
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-Xbootclasspath/a:.",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-XX:+UseCompressedOops",
"-XX:HeapBaseMinAddress=33G",
DummyClassWithMainTryingToReadFromNoaccessArea.class.getName());
OutputAnalyzer output = new OutputAnalyzer(pb.start());
System.out.println("******* Printing stdout for analysis in case of failure *******");
System.out.println(output.getStdout());
System.out.println("******* Printing stderr for analysis in case of failure *******");
System.out.println(output.getStderr());
System.out.println("***************************************************************");
if (output.getStdout() != null && output.getStdout().contains("WB_ReadFromNoaccessArea method is useless")) {
// Test conditions broken. There is no protected page in ReservedHeapSpace in these circumstances. Silently passing test.
return;
}
if (Platform.isWindows()) {
output.shouldContain("EXCEPTION_ACCESS_VIOLATION");
} else if (Platform.isOSX()) {
output.shouldContain("SIGBUS");
} else {
output.shouldContain("SIGSEGV");
}
}
public static class DummyClassWithMainTryingToReadFromNoaccessArea {
// This method calls whitebox method reading from noaccess area
public static void main(String args[]) throws Exception {
WhiteBox.getWhiteBox().readFromNoaccessArea();
throw new Exception("Call of readFromNoaccessArea succeeded! This is wrong. Crash expected. Test failed.");
}
}
}

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Test launches unit tests inside vm concurrently
* @library /testlibrary /testlibrary/whitebox
* @build RunUnitTestsConcurrently
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI RunUnitTestsConcurrently 30 15000
*/
import com.oracle.java.testlibrary.*;
import sun.hotspot.WhiteBox;
public class RunUnitTestsConcurrently {
private static WhiteBox wb;
private static long timeout;
private static long timeStamp;
public static class Worker implements Runnable {
@Override
public void run() {
while (System.currentTimeMillis() - timeStamp < timeout) {
WhiteBox.getWhiteBox().runMemoryUnitTests();
}
}
}
public static void main(String[] args) throws InterruptedException {
if (!Platform.isDebugBuild() || !Platform.is64bit()) {
return;
}
wb = WhiteBox.getWhiteBox();
System.out.println("Starting threads");
int threads = Integer.valueOf(args[0]);
timeout = Long.valueOf(args[1]);
timeStamp = System.currentTimeMillis();
Thread[] threadsArray = new Thread[threads];
for (int i = 0; i < threads; i++) {
threadsArray[i] = new Thread(new Worker());
threadsArray[i].start();
}
for (int i = 0; i < threads; i++) {
threadsArray[i].join();
}
System.out.println("Quitting test.");
}
}

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Stress test that expands/shrinks VirtualSpace
* @library /testlibrary /testlibrary/whitebox
* @build StressVirtualSpaceResize
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI StressVirtualSpaceResize
*/
import sun.hotspot.WhiteBox;
public class StressVirtualSpaceResize {
public static void main(String args[]) throws Exception {
if (WhiteBox.getWhiteBox().stressVirtualSpaceResize(1000, 0xffffL, 0xffffL) != 0)
throw new RuntimeException("Whitebox method stressVirtualSpaceResize returned non zero exit code");
}
}

View File

@ -144,4 +144,10 @@ public class WhiteBox {
// force Full GC
public native void fullGC();
// Tests on ReservedSpace/VirtualSpace classes
public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
public native void runMemoryUnitTests();
public native void readFromNoaccessArea();
}

View File

@ -237,3 +237,5 @@ c1f9158fbb9c2da50f6946fffd974e8236e08447 jdk8-b112
0046d2278204b7eff76803fc4623cb48c7e6384d jdk8-b113
1b1e12117fe2840e5d21ae9a4b309e4f981f3ea8 jdk8-b114
f610fd46463e6b0533dd92bce11a1e7d84984e64 jdk8-b115
e757eb9aee3d6bec7da074c47e07616104a8df33 jdk8-b116
c1d234d4f16472a5163464420fa00b25ffa5298a jdk8-b117

View File

@ -237,3 +237,5 @@ dbdd5c76250928582cb5342bcf7b299a6007d538 jdk8-b112
9261f342aa73a79bbd1a817ae72fa72b15ef30bc jdk8-b113
9ad289610fc6effe9076280b7920d0f16470709f jdk8-b114
e126d8eca69b83a1cc159c2375b7c33140346d2b jdk8-b115
587560c222a2476066852224ed02d39b5090a299 jdk8-b116
fe56ba456fd32758c72db629938d69067468d89c jdk8-b117

View File

@ -1,5 +1,6 @@
^build/
^dist/
^testoutput/
/nbproject/private/
^make/netbeans/.*/build/
^make/netbeans/.*/dist/

View File

@ -237,3 +237,5 @@ f002f5f3a16cca62e139cb8eed05ffaeb373587d jdk8-b112
5b4261b4b72af53e8e178933ef6bc6c7f8cdbc60 jdk8-b113
f26a0c8071bde1e3b923713c75156e4a58955623 jdk8-b114
f82b730c798b6bf38946baaba8a7d80fd5efaa70 jdk8-b115
0dc0067f3b8efb299a4c23f76ee26ea64df9e1d7 jdk8-b116
fc4ac66aa657e09de5f8257c3174f240ed0cbaf7 jdk8-b117

View File

@ -103,6 +103,7 @@ SUNWprivate_1.1 {
Java_sun_management_VMManagementImpl_getSafepointCount;
Java_sun_management_VMManagementImpl_getSafepointSyncTime;
Java_sun_management_VMManagementImpl_getStartupTime;
Java_sun_management_VMManagementImpl_getUptime0;
Java_sun_management_VMManagementImpl_getTotalApplicationNonStoppedTime;
Java_sun_management_VMManagementImpl_getTotalClassCount;
Java_sun_management_VMManagementImpl_getTotalCompileTime;

Some files were not shown because too many files have changed in this diff Show More