Merge
This commit is contained in:
commit
48a315bb52
@ -29,6 +29,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
|
||||
@ -80,7 +81,7 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_in
|
||||
(JNIEnv *env, jclass cls) {
|
||||
jclass listClass;
|
||||
|
||||
if (init_libproc(getenv("LIBSAPROC_DEBUG")) != true) {
|
||||
if (init_libproc(getenv("LIBSAPROC_DEBUG") != NULL) != true) {
|
||||
THROW_NEW_DEBUGGER_EXCEPTION("can't initialize libproc");
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
#include <errno.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include "libproc_impl.h"
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "salibelf.h"
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
extern void print_debug(const char*,...);
|
||||
|
||||
|
@ -305,7 +305,7 @@ static struct symtab* build_symtab_from_build_id(Elf64_Nhdr *note)
|
||||
|
||||
unsigned char *bytes
|
||||
= (unsigned char*)(note+1) + note->n_namesz;
|
||||
unsigned char *filename
|
||||
char *filename
|
||||
= (build_id_to_debug_filename (note->n_descsz, bytes));
|
||||
|
||||
fd = pathmap_open(filename);
|
||||
|
@ -134,15 +134,13 @@ public class VM {
|
||||
private String type;
|
||||
private String name;
|
||||
private Address addr;
|
||||
private String kind;
|
||||
private int origin;
|
||||
private int flags;
|
||||
|
||||
private Flag(String type, String name, Address addr, String kind, int origin) {
|
||||
private Flag(String type, String name, Address addr, int flags) {
|
||||
this.type = type;
|
||||
this.name = name;
|
||||
this.addr = addr;
|
||||
this.kind = kind;
|
||||
this.origin = origin;
|
||||
this.flags = flags;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
@ -157,12 +155,8 @@ public class VM {
|
||||
return addr;
|
||||
}
|
||||
|
||||
public String getKind() {
|
||||
return kind;
|
||||
}
|
||||
|
||||
public int getOrigin() {
|
||||
return origin;
|
||||
return flags & 0xF; // XXX can we get the mask bits from somewhere?
|
||||
}
|
||||
|
||||
public boolean isBool() {
|
||||
@ -173,8 +167,7 @@ public class VM {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(isBool(), "not a bool flag!");
|
||||
}
|
||||
return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned())
|
||||
!= 0;
|
||||
return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned()) != 0;
|
||||
}
|
||||
|
||||
public boolean isIntx() {
|
||||
@ -843,11 +836,10 @@ public class VM {
|
||||
|
||||
Address flagAddr = flagType.getAddressField("flags").getValue();
|
||||
|
||||
AddressField typeFld = flagType.getAddressField("type");
|
||||
AddressField nameFld = flagType.getAddressField("name");
|
||||
AddressField addrFld = flagType.getAddressField("addr");
|
||||
AddressField kindFld = flagType.getAddressField("kind");
|
||||
CIntField originFld = new CIntField(flagType.getCIntegerField("origin"), 0);
|
||||
AddressField typeFld = flagType.getAddressField("_type");
|
||||
AddressField nameFld = flagType.getAddressField("_name");
|
||||
AddressField addrFld = flagType.getAddressField("_addr");
|
||||
CIntField flagsFld = new CIntField(flagType.getCIntegerField("_flags"), 0);
|
||||
|
||||
long flagSize = flagType.getSize(); // sizeof(Flag)
|
||||
|
||||
@ -856,9 +848,8 @@ public class VM {
|
||||
String type = CStringUtilities.getString(typeFld.getValue(flagAddr));
|
||||
String name = CStringUtilities.getString(nameFld.getValue(flagAddr));
|
||||
Address addr = addrFld.getValue(flagAddr);
|
||||
String kind = CStringUtilities.getString(kindFld.getValue(flagAddr));
|
||||
int origin = (int)originFld.getValue(flagAddr);
|
||||
commandLineFlags[f] = new Flag(type, name, addr, kind, origin);
|
||||
int flags = (int)flagsFld.getValue(flagAddr);
|
||||
commandLineFlags[f] = new Flag(type, name, addr, flags);
|
||||
flagAddr = flagAddr.addOffsetTo(flagSize);
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ endif
|
||||
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# However we need to clean the code up before we can unrestrictedly enable this option with Clang
|
||||
WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
|
||||
WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
|
||||
WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare
|
||||
# Not yet supported by clang in Xcode 4.6.2
|
||||
# WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
|
||||
@ -262,7 +262,7 @@ ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \&
|
||||
# conversions which might affect the values. Only enable it in earlier versions.
|
||||
WARNING_FLAGS = -Wunused-function
|
||||
ifeq ($(USE_CLANG),)
|
||||
WARNINGS_FLAGS += -Wconversion
|
||||
WARNING_FLAGS += -Wconversion
|
||||
endif
|
||||
endif
|
||||
|
||||
|
@ -208,7 +208,7 @@ WARNINGS_ARE_ERRORS = -Werror
|
||||
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# However we need to clean the code up before we can unrestrictedly enable this option with Clang
|
||||
WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
|
||||
WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
|
||||
WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare
|
||||
WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
|
||||
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -121,6 +121,7 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
|
||||
|
||||
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
|
||||
bool for_compiler_entry) {
|
||||
Label L_no_such_method;
|
||||
assert(method == G5_method, "interpreter calling convention");
|
||||
assert_different_registers(method, target, temp);
|
||||
|
||||
@ -133,6 +134,9 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
||||
const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
|
||||
__ ld(interp_only, temp);
|
||||
__ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code);
|
||||
// Null method test is replicated below in compiled case,
|
||||
// it might be able to address across the verify_thread()
|
||||
__ br_null_short(G5_method, Assembler::pn, L_no_such_method);
|
||||
__ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
|
||||
__ jmp(target, 0);
|
||||
__ delayed()->nop();
|
||||
@ -141,11 +145,19 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
||||
// it doesn't matter, since this is interpreter code.
|
||||
}
|
||||
|
||||
// Compiled case, either static or fall-through from runtime conditional
|
||||
__ br_null_short(G5_method, Assembler::pn, L_no_such_method);
|
||||
|
||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||
Method::from_interpreted_offset();
|
||||
__ ld_ptr(G5_method, in_bytes(entry_offset), target);
|
||||
__ jmp(target, 0);
|
||||
__ delayed()->nop();
|
||||
|
||||
__ bind(L_no_such_method);
|
||||
AddressLiteral ame(StubRoutines::throw_AbstractMethodError_entry());
|
||||
__ jump_to(ame, temp);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
|
@ -2018,6 +2018,15 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return L7_REGP_mask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::mathExactI_result_proj_mask() {
|
||||
return G1_REGI_mask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
||||
return INT_FLAGS_mask();
|
||||
}
|
||||
|
||||
|
||||
%}
|
||||
|
||||
|
||||
@ -4245,12 +4254,16 @@ operand cmpOp() %{
|
||||
greater_equal(0xB);
|
||||
less_equal(0x2);
|
||||
greater(0xA);
|
||||
overflow(0x7);
|
||||
no_overflow(0xF);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Comparison Op, unsigned
|
||||
operand cmpOpU() %{
|
||||
match(Bool);
|
||||
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
|
||||
n->as_Bool()->_test._test != BoolTest::no_overflow);
|
||||
|
||||
format %{ "u" %}
|
||||
interface(COND_INTER) %{
|
||||
@ -4260,12 +4273,16 @@ operand cmpOpU() %{
|
||||
greater_equal(0xD);
|
||||
less_equal(0x4);
|
||||
greater(0xC);
|
||||
overflow(0x7);
|
||||
no_overflow(0xF);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Comparison Op, pointer (same as unsigned)
|
||||
operand cmpOpP() %{
|
||||
match(Bool);
|
||||
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
|
||||
n->as_Bool()->_test._test != BoolTest::no_overflow);
|
||||
|
||||
format %{ "p" %}
|
||||
interface(COND_INTER) %{
|
||||
@ -4275,12 +4292,16 @@ operand cmpOpP() %{
|
||||
greater_equal(0xD);
|
||||
less_equal(0x4);
|
||||
greater(0xC);
|
||||
overflow(0x7);
|
||||
no_overflow(0xF);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Comparison Op, branch-register encoding
|
||||
operand cmpOp_reg() %{
|
||||
match(Bool);
|
||||
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
|
||||
n->as_Bool()->_test._test != BoolTest::no_overflow);
|
||||
|
||||
format %{ "" %}
|
||||
interface(COND_INTER) %{
|
||||
@ -4290,12 +4311,16 @@ operand cmpOp_reg() %{
|
||||
greater_equal(0x7);
|
||||
less_equal (0x2);
|
||||
greater (0x6);
|
||||
overflow(0x7); // not supported
|
||||
no_overflow(0xF); // not supported
|
||||
%}
|
||||
%}
|
||||
|
||||
// Comparison Code, floating, unordered same as less
|
||||
operand cmpOpF() %{
|
||||
match(Bool);
|
||||
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
|
||||
n->as_Bool()->_test._test != BoolTest::no_overflow);
|
||||
|
||||
format %{ "fl" %}
|
||||
interface(COND_INTER) %{
|
||||
@ -4305,12 +4330,17 @@ operand cmpOpF() %{
|
||||
greater_equal(0xB);
|
||||
less_equal(0xE);
|
||||
greater(0x6);
|
||||
|
||||
overflow(0x7); // not supported
|
||||
no_overflow(0xF); // not supported
|
||||
%}
|
||||
%}
|
||||
|
||||
// Used by long compare
|
||||
operand cmpOp_commute() %{
|
||||
match(Bool);
|
||||
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
|
||||
n->as_Bool()->_test._test != BoolTest::no_overflow);
|
||||
|
||||
format %{ "" %}
|
||||
interface(COND_INTER) %{
|
||||
@ -4320,6 +4350,8 @@ operand cmpOp_commute() %{
|
||||
greater_equal(0x2);
|
||||
less_equal(0xB);
|
||||
greater(0x3);
|
||||
overflow(0x7);
|
||||
no_overflow(0xF);
|
||||
%}
|
||||
%}
|
||||
|
||||
|
@ -4769,7 +4769,7 @@ void Assembler::adcq(Register dst, Address src) {
|
||||
}
|
||||
|
||||
void Assembler::adcq(Register dst, Register src) {
|
||||
(int) prefixq_and_encode(dst->encoding(), src->encoding());
|
||||
(void) prefixq_and_encode(dst->encoding(), src->encoding());
|
||||
emit_arith(0x13, 0xC0, dst, src);
|
||||
}
|
||||
|
||||
@ -4824,7 +4824,7 @@ void Assembler::andq(Register dst, Address src) {
|
||||
}
|
||||
|
||||
void Assembler::andq(Register dst, Register src) {
|
||||
(int) prefixq_and_encode(dst->encoding(), src->encoding());
|
||||
(void) prefixq_and_encode(dst->encoding(), src->encoding());
|
||||
emit_arith(0x23, 0xC0, dst, src);
|
||||
}
|
||||
|
||||
|
@ -114,6 +114,11 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
|
||||
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
|
||||
bool for_compiler_entry) {
|
||||
assert(method == rbx, "interpreter calling convention");
|
||||
|
||||
Label L_no_such_method;
|
||||
__ testptr(rbx, rbx);
|
||||
__ jcc(Assembler::zero, L_no_such_method);
|
||||
|
||||
__ verify_method_ptr(method);
|
||||
|
||||
if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
|
||||
@ -138,6 +143,9 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||
Method::from_interpreted_offset();
|
||||
__ jmp(Address(method, entry_offset));
|
||||
|
||||
__ bind(L_no_such_method);
|
||||
__ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
|
||||
}
|
||||
|
||||
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
|
@ -1534,6 +1534,14 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return EBP_REG_mask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::mathExactI_result_proj_mask() {
|
||||
return EAX_REG_mask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
||||
return INT_FLAGS_mask();
|
||||
}
|
||||
|
||||
// Returns true if the high 32 bits of the value is known to be zero.
|
||||
bool is_operand_hi32_zero(Node* n) {
|
||||
int opc = n->Opcode();
|
||||
@ -4922,6 +4930,8 @@ operand cmpOp() %{
|
||||
greater_equal(0xD, "ge");
|
||||
less_equal(0xE, "le");
|
||||
greater(0xF, "g");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -4939,6 +4949,8 @@ operand cmpOpU() %{
|
||||
greater_equal(0x3, "nb");
|
||||
less_equal(0x6, "be");
|
||||
greater(0x7, "nbe");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -4957,6 +4969,8 @@ operand cmpOpUCF() %{
|
||||
greater_equal(0x3, "nb");
|
||||
less_equal(0x6, "be");
|
||||
greater(0x7, "nbe");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -4974,6 +4988,8 @@ operand cmpOpUCF2() %{
|
||||
greater_equal(0x3, "nb");
|
||||
less_equal(0x6, "be");
|
||||
greater(0x7, "nbe");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -4981,6 +4997,8 @@ operand cmpOpUCF2() %{
|
||||
operand cmpOp_fcmov() %{
|
||||
match(Bool);
|
||||
|
||||
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
|
||||
n->as_Bool()->_test._test != BoolTest::no_overflow);
|
||||
format %{ "" %}
|
||||
interface(COND_INTER) %{
|
||||
equal (0x0C8);
|
||||
@ -4989,6 +5007,8 @@ operand cmpOp_fcmov() %{
|
||||
greater_equal(0x1C0);
|
||||
less_equal (0x0D0);
|
||||
greater (0x1D0);
|
||||
overflow(0x0, "o"); // not really supported by the instruction
|
||||
no_overflow(0x1, "no"); // not really supported by the instruction
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -5004,6 +5024,8 @@ operand cmpOp_commute() %{
|
||||
greater_equal(0xE, "le");
|
||||
less_equal(0xD, "ge");
|
||||
greater(0xC, "l");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -7496,6 +7518,31 @@ instruct cmovL_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegL dst, eRegL src) %{
|
||||
|
||||
//----------Arithmetic Instructions--------------------------------------------
|
||||
//----------Addition Instructions----------------------------------------------
|
||||
|
||||
instruct addExactI_rReg(eAXRegI dst, rRegI src, eFlagsReg cr)
|
||||
%{
|
||||
match(AddExactI dst src);
|
||||
effect(DEF cr);
|
||||
|
||||
format %{ "ADD $dst, $src\t# addExact int" %}
|
||||
ins_encode %{
|
||||
__ addl($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
||||
instruct addExactI_rReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
|
||||
%{
|
||||
match(AddExactI dst src);
|
||||
effect(DEF cr);
|
||||
|
||||
format %{ "ADD $dst, $src\t# addExact int" %}
|
||||
ins_encode %{
|
||||
__ addl($dst$$Register, $src$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
||||
// Integer Addition Instructions
|
||||
instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
|
||||
match(Set dst (AddI dst src));
|
||||
|
@ -1649,6 +1649,14 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
||||
return PTR_RBP_REG_mask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::mathExactI_result_proj_mask() {
|
||||
return INT_RAX_REG_mask();
|
||||
}
|
||||
|
||||
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
||||
return INT_FLAGS_mask();
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
//----------ENCODING BLOCK-----------------------------------------------------
|
||||
@ -4133,6 +4141,8 @@ operand cmpOp()
|
||||
greater_equal(0xD, "ge");
|
||||
less_equal(0xE, "le");
|
||||
greater(0xF, "g");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -4151,6 +4161,8 @@ operand cmpOpU()
|
||||
greater_equal(0x3, "nb");
|
||||
less_equal(0x6, "be");
|
||||
greater(0x7, "nbe");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -4170,6 +4182,8 @@ operand cmpOpUCF() %{
|
||||
greater_equal(0x3, "nb");
|
||||
less_equal(0x6, "be");
|
||||
greater(0x7, "nbe");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -4187,6 +4201,8 @@ operand cmpOpUCF2() %{
|
||||
greater_equal(0x3, "nb");
|
||||
less_equal(0x6, "be");
|
||||
greater(0x7, "nbe");
|
||||
overflow(0x0, "o");
|
||||
no_overflow(0x1, "no");
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -6922,6 +6938,30 @@ instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{
|
||||
//----------Arithmetic Instructions--------------------------------------------
|
||||
//----------Addition Instructions----------------------------------------------
|
||||
|
||||
instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
|
||||
%{
|
||||
match(AddExactI dst src);
|
||||
effect(DEF cr);
|
||||
|
||||
format %{ "addl $dst, $src\t# addExact int" %}
|
||||
ins_encode %{
|
||||
__ addl($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
||||
instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
|
||||
%{
|
||||
match(AddExactI dst src);
|
||||
effect(DEF cr);
|
||||
|
||||
format %{ "addl $dst, $src\t# addExact int" %}
|
||||
ins_encode %{
|
||||
__ addl($dst$$Register, $src$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
||||
instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AddI dst src));
|
||||
|
@ -3395,12 +3395,16 @@ Interface *ADLParser::cond_interface_parse(void) {
|
||||
char *greater_equal;
|
||||
char *less_equal;
|
||||
char *greater;
|
||||
char *overflow;
|
||||
char *no_overflow;
|
||||
const char *equal_format = "eq";
|
||||
const char *not_equal_format = "ne";
|
||||
const char *less_format = "lt";
|
||||
const char *greater_equal_format = "ge";
|
||||
const char *less_equal_format = "le";
|
||||
const char *greater_format = "gt";
|
||||
const char *overflow_format = "o";
|
||||
const char *no_overflow_format = "no";
|
||||
|
||||
if (_curchar != '%') {
|
||||
parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
|
||||
@ -3437,6 +3441,12 @@ Interface *ADLParser::cond_interface_parse(void) {
|
||||
else if ( strcmp(field,"greater") == 0 ) {
|
||||
greater = interface_field_parse(&greater_format);
|
||||
}
|
||||
else if ( strcmp(field,"overflow") == 0 ) {
|
||||
overflow = interface_field_parse(&overflow_format);
|
||||
}
|
||||
else if ( strcmp(field,"no_overflow") == 0 ) {
|
||||
no_overflow = interface_field_parse(&no_overflow_format);
|
||||
}
|
||||
else {
|
||||
parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%%}' ending interface.\n");
|
||||
return NULL;
|
||||
@ -3455,7 +3465,9 @@ Interface *ADLParser::cond_interface_parse(void) {
|
||||
less, less_format,
|
||||
greater_equal, greater_equal_format,
|
||||
less_equal, less_equal_format,
|
||||
greater, greater_format);
|
||||
greater, greater_format,
|
||||
overflow, overflow_format,
|
||||
no_overflow, no_overflow_format);
|
||||
return inter;
|
||||
}
|
||||
|
||||
|
@ -1192,6 +1192,8 @@ void ArchDesc::buildMustCloneMap(FILE *fp_hpp, FILE *fp_cpp) {
|
||||
|| strcmp(idealName,"CmpF") == 0
|
||||
|| strcmp(idealName,"FastLock") == 0
|
||||
|| strcmp(idealName,"FastUnlock") == 0
|
||||
|| strcmp(idealName,"AddExactI") == 0
|
||||
|| strcmp(idealName,"FlagsProj") == 0
|
||||
|| strcmp(idealName,"Bool") == 0
|
||||
|| strcmp(idealName,"Binary") == 0 ) {
|
||||
// Removed ConI from the must_clone list. CPUs that cannot use
|
||||
|
@ -2757,14 +2757,18 @@ CondInterface::CondInterface(const char* equal, const char* equal_format
|
||||
const char* less, const char* less_format,
|
||||
const char* greater_equal, const char* greater_equal_format,
|
||||
const char* less_equal, const char* less_equal_format,
|
||||
const char* greater, const char* greater_format)
|
||||
const char* greater, const char* greater_format,
|
||||
const char* overflow, const char* overflow_format,
|
||||
const char* no_overflow, const char* no_overflow_format)
|
||||
: Interface("COND_INTER"),
|
||||
_equal(equal), _equal_format(equal_format),
|
||||
_not_equal(not_equal), _not_equal_format(not_equal_format),
|
||||
_less(less), _less_format(less_format),
|
||||
_greater_equal(greater_equal), _greater_equal_format(greater_equal_format),
|
||||
_less_equal(less_equal), _less_equal_format(less_equal_format),
|
||||
_greater(greater), _greater_format(greater_format) {
|
||||
_greater(greater), _greater_format(greater_format),
|
||||
_overflow(overflow), _overflow_format(overflow_format),
|
||||
_no_overflow(no_overflow), _no_overflow_format(no_overflow_format) {
|
||||
}
|
||||
CondInterface::~CondInterface() {
|
||||
// not owner of any character arrays
|
||||
@ -2777,12 +2781,14 @@ void CondInterface::dump() {
|
||||
// Write info to output files
|
||||
void CondInterface::output(FILE *fp) {
|
||||
Interface::output(fp);
|
||||
if ( _equal != NULL ) fprintf(fp," equal == %s\n", _equal);
|
||||
if ( _not_equal != NULL ) fprintf(fp," not_equal == %s\n", _not_equal);
|
||||
if ( _less != NULL ) fprintf(fp," less == %s\n", _less);
|
||||
if ( _greater_equal != NULL ) fprintf(fp," greater_equal == %s\n", _greater_equal);
|
||||
if ( _less_equal != NULL ) fprintf(fp," less_equal == %s\n", _less_equal);
|
||||
if ( _greater != NULL ) fprintf(fp," greater == %s\n", _greater);
|
||||
if ( _equal != NULL ) fprintf(fp," equal == %s\n", _equal);
|
||||
if ( _not_equal != NULL ) fprintf(fp," not_equal == %s\n", _not_equal);
|
||||
if ( _less != NULL ) fprintf(fp," less == %s\n", _less);
|
||||
if ( _greater_equal != NULL ) fprintf(fp," greater_equal == %s\n", _greater_equal);
|
||||
if ( _less_equal != NULL ) fprintf(fp," less_equal == %s\n", _less_equal);
|
||||
if ( _greater != NULL ) fprintf(fp," greater == %s\n", _greater);
|
||||
if ( _overflow != NULL ) fprintf(fp," overflow == %s\n", _overflow);
|
||||
if ( _no_overflow != NULL ) fprintf(fp," no_overflow == %s\n", _no_overflow);
|
||||
// fprintf(fp,"\n");
|
||||
}
|
||||
|
||||
|
@ -798,12 +798,16 @@ public:
|
||||
const char *_greater_equal;
|
||||
const char *_less_equal;
|
||||
const char *_greater;
|
||||
const char *_overflow;
|
||||
const char *_no_overflow;
|
||||
const char *_equal_format;
|
||||
const char *_not_equal_format;
|
||||
const char *_less_format;
|
||||
const char *_greater_equal_format;
|
||||
const char *_less_equal_format;
|
||||
const char *_greater_format;
|
||||
const char *_overflow_format;
|
||||
const char *_no_overflow_format;
|
||||
|
||||
// Public Methods
|
||||
CondInterface(const char* equal, const char* equal_format,
|
||||
@ -811,7 +815,9 @@ public:
|
||||
const char* less, const char* less_format,
|
||||
const char* greater_equal, const char* greater_equal_format,
|
||||
const char* less_equal, const char* less_equal_format,
|
||||
const char* greater, const char* greater_format);
|
||||
const char* greater, const char* greater_format,
|
||||
const char* overflow, const char* overflow_format,
|
||||
const char* no_overflow, const char* no_overflow_format);
|
||||
~CondInterface();
|
||||
|
||||
void dump();
|
||||
|
@ -388,6 +388,8 @@ static void defineCCodeDump(OperandForm* oper, FILE *fp, int i) {
|
||||
fprintf(fp, " else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format);
|
||||
fprintf(fp, " else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format);
|
||||
fprintf(fp, " else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format);
|
||||
fprintf(fp, " else if( _c%d == BoolTest::overflow ) st->print(\"%s\");\n",i,cond->_overflow_format);
|
||||
fprintf(fp, " else if( _c%d == BoolTest::no_overflow ) st->print(\"%s\");\n",i,cond->_no_overflow_format);
|
||||
}
|
||||
|
||||
// Output code that dumps constant values, increment "i" if type is constant
|
||||
@ -1208,6 +1210,8 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
fprintf(fp," case BoolTest::ne : return not_equal();\n");
|
||||
fprintf(fp," case BoolTest::le : return less_equal();\n");
|
||||
fprintf(fp," case BoolTest::ge : return greater_equal();\n");
|
||||
fprintf(fp," case BoolTest::overflow : return overflow();\n");
|
||||
fprintf(fp," case BoolTest::no_overflow: return no_overflow();\n");
|
||||
fprintf(fp," default : ShouldNotReachHere(); return 0;\n");
|
||||
fprintf(fp," }\n");
|
||||
fprintf(fp," };\n");
|
||||
@ -1373,6 +1377,14 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
if( greater != NULL ) {
|
||||
define_oper_interface(fp, *oper, _globalNames, "greater", greater);
|
||||
}
|
||||
const char *overflow = cInterface->_overflow;
|
||||
if( overflow != NULL ) {
|
||||
define_oper_interface(fp, *oper, _globalNames, "overflow", overflow);
|
||||
}
|
||||
const char *no_overflow = cInterface->_no_overflow;
|
||||
if( no_overflow != NULL ) {
|
||||
define_oper_interface(fp, *oper, _globalNames, "no_overflow", no_overflow);
|
||||
}
|
||||
} // end Conditional Interface
|
||||
// Check if it is a Constant Interface
|
||||
else if (oper->_interface->is_ConstInterface() != NULL ) {
|
||||
|
@ -1078,14 +1078,17 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
// replace instructions
|
||||
// first replace the tail, then the call
|
||||
#ifdef ARM
|
||||
if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) {
|
||||
if((load_klass_or_mirror_patch_id ||
|
||||
stub_id == Runtime1::load_appendix_patching_id) &&
|
||||
!VM_Version::supports_movw()) {
|
||||
nmethod* nm = CodeCache::find_nmethod(instr_pc);
|
||||
address addr = NULL;
|
||||
assert(nm != NULL, "invalid nmethod_pc");
|
||||
RelocIterator mds(nm, copy_buff, copy_buff + 1);
|
||||
while (mds.next()) {
|
||||
if (mds.type() == relocInfo::oop_type) {
|
||||
assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
|
||||
assert(stub_id == Runtime1::load_mirror_patching_id ||
|
||||
stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
|
||||
oop_Relocation* r = mds.oop_reloc();
|
||||
addr = (address)r->oop_addr();
|
||||
break;
|
||||
|
@ -1787,7 +1787,7 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
|
||||
if (_location != _in_method) break; // only allow for methods
|
||||
if (!privileged) break; // only allow in privileged code
|
||||
return _method_LambdaForm_Hidden;
|
||||
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
|
||||
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_Stable_signature):
|
||||
if (_location != _in_field) break; // only allow for fields
|
||||
if (!privileged) break; // only allow in privileged code
|
||||
return _field_Stable;
|
||||
|
@ -793,7 +793,7 @@ void DefaultMethods::generate_default_methods(
|
||||
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
#ifdef ASSERT
|
||||
// Return true is broad type is a covariant return of narrow type
|
||||
static bool covariant_return_type(BasicType narrow, BasicType broad) {
|
||||
if (narrow == broad) {
|
||||
@ -804,7 +804,7 @@ static bool covariant_return_type(BasicType narrow, BasicType broad) {
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
#endif
|
||||
|
||||
static int assemble_redirect(
|
||||
BytecodeConstantPool* cp, BytecodeBuffer* buffer,
|
||||
@ -1095,4 +1095,3 @@ static void merge_in_new_methods(InstanceKlass* klass,
|
||||
MetadataFactory::free_array(cld, original_ordering);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -270,7 +270,7 @@
|
||||
template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \
|
||||
template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \
|
||||
template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \
|
||||
template(sun_invoke_Stable_signature, "Lsun/invoke/Stable;") \
|
||||
template(java_lang_invoke_Stable_signature, "Ljava/lang/invoke/Stable;") \
|
||||
template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
|
||||
template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \
|
||||
template(java_lang_invoke_MagicLambdaImpl, "java/lang/invoke/MagicLambdaImpl") \
|
||||
@ -631,6 +631,10 @@
|
||||
do_name(log_name,"log") do_name(log10_name,"log10") do_name(pow_name,"pow") \
|
||||
do_name(exp_name,"exp") do_name(min_name,"min") do_name(max_name,"max") \
|
||||
\
|
||||
do_name(addExact_name,"addExact") \
|
||||
do_name(subtractExact_name,"subtractExact") \
|
||||
do_name(multiplyExact_name,"multiplyExact") \
|
||||
\
|
||||
do_intrinsic(_dabs, java_lang_Math, abs_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_dsin, java_lang_Math, sin_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_dcos, java_lang_Math, cos_name, double_double_signature, F_S) \
|
||||
@ -643,6 +647,7 @@
|
||||
do_intrinsic(_dexp, java_lang_Math, exp_name, double_double_signature, F_S) \
|
||||
do_intrinsic(_min, java_lang_Math, min_name, int2_int_signature, F_S) \
|
||||
do_intrinsic(_max, java_lang_Math, max_name, int2_int_signature, F_S) \
|
||||
do_intrinsic(_addExact, java_lang_Math, addExact_name, int2_int_signature, F_S) \
|
||||
\
|
||||
do_intrinsic(_floatToRawIntBits, java_lang_Float, floatToRawIntBits_name, float_int_signature, F_S) \
|
||||
do_name( floatToRawIntBits_name, "floatToRawIntBits") \
|
||||
|
@ -124,7 +124,6 @@ int CodeCache::_number_of_nmethods = 0;
|
||||
int CodeCache::_number_of_nmethods_with_dependencies = 0;
|
||||
bool CodeCache::_needs_cache_clean = false;
|
||||
nmethod* CodeCache::_scavenge_root_nmethods = NULL;
|
||||
nmethod* CodeCache::_saved_nmethods = NULL;
|
||||
|
||||
int CodeCache::_codemem_full_count = 0;
|
||||
|
||||
@ -464,96 +463,11 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
|
||||
}
|
||||
#endif //PRODUCT
|
||||
|
||||
/**
|
||||
* Remove and return nmethod from the saved code list in order to reanimate it.
|
||||
*/
|
||||
nmethod* CodeCache::reanimate_saved_code(Method* m) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
nmethod* saved = _saved_nmethods;
|
||||
nmethod* prev = NULL;
|
||||
while (saved != NULL) {
|
||||
if (saved->is_in_use() && saved->method() == m) {
|
||||
if (prev != NULL) {
|
||||
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
|
||||
} else {
|
||||
_saved_nmethods = saved->saved_nmethod_link();
|
||||
}
|
||||
assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
|
||||
saved->set_speculatively_disconnected(false);
|
||||
saved->set_saved_nmethod_link(NULL);
|
||||
if (PrintMethodFlushing) {
|
||||
saved->print_on(tty, " ### nmethod is reconnected");
|
||||
}
|
||||
if (LogCompilation && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
|
||||
xtty->method(m);
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
}
|
||||
return saved;
|
||||
}
|
||||
prev = saved;
|
||||
saved = saved->saved_nmethod_link();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove nmethod from the saved code list in order to discard it permanently
|
||||
*/
|
||||
void CodeCache::remove_saved_code(nmethod* nm) {
|
||||
// For conc swpr this will be called with CodeCache_lock taken by caller
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
|
||||
nmethod* saved = _saved_nmethods;
|
||||
nmethod* prev = NULL;
|
||||
while (saved != NULL) {
|
||||
if (saved == nm) {
|
||||
if (prev != NULL) {
|
||||
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
|
||||
} else {
|
||||
_saved_nmethods = saved->saved_nmethod_link();
|
||||
}
|
||||
if (LogCompilation && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
}
|
||||
return;
|
||||
}
|
||||
prev = saved;
|
||||
saved = saved->saved_nmethod_link();
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void CodeCache::speculatively_disconnect(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
|
||||
nm->set_saved_nmethod_link(_saved_nmethods);
|
||||
_saved_nmethods = nm;
|
||||
if (PrintMethodFlushing) {
|
||||
nm->print_on(tty, " ### nmethod is speculatively disconnected");
|
||||
}
|
||||
if (LogCompilation && (xtty != NULL)) {
|
||||
ttyLocker ttyl;
|
||||
xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
|
||||
xtty->method(nm->method());
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
}
|
||||
nm->method()->clear_code();
|
||||
nm->set_speculatively_disconnected(true);
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::gc_prologue() {
|
||||
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::gc_epilogue() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_ALIVE_BLOBS(cb) {
|
||||
|
@ -57,7 +57,6 @@ class CodeCache : AllStatic {
|
||||
static int _number_of_nmethods_with_dependencies;
|
||||
static bool _needs_cache_clean;
|
||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||
static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods.
|
||||
|
||||
static void verify_if_often() PRODUCT_RETURN;
|
||||
|
||||
@ -167,17 +166,12 @@ class CodeCache : AllStatic {
|
||||
static size_t capacity() { return _heap->capacity(); }
|
||||
static size_t max_capacity() { return _heap->max_capacity(); }
|
||||
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
||||
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
|
||||
static double reverse_free_ratio();
|
||||
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
static nmethod* reanimate_saved_code(Method* m);
|
||||
static void remove_saved_code(nmethod* nm);
|
||||
static void speculatively_disconnect(nmethod* nm);
|
||||
|
||||
// Deoptimization
|
||||
static int mark_for_deoptimization(DepChange& changes);
|
||||
#ifdef HOTSWAP
|
||||
|
@ -462,7 +462,6 @@ void nmethod::init_defaults() {
|
||||
_state = alive;
|
||||
_marked_for_reclamation = 0;
|
||||
_has_flushed_dependencies = 0;
|
||||
_speculatively_disconnected = 0;
|
||||
_has_unsafe_access = 0;
|
||||
_has_method_handle_invokes = 0;
|
||||
_lazy_critical_native = 0;
|
||||
@ -481,7 +480,6 @@ void nmethod::init_defaults() {
|
||||
_osr_link = NULL;
|
||||
_scavenge_root_link = NULL;
|
||||
_scavenge_root_state = 0;
|
||||
_saved_nmethod_link = NULL;
|
||||
_compiler = NULL;
|
||||
|
||||
#ifdef HAVE_DTRACE_H
|
||||
@ -686,6 +684,7 @@ nmethod::nmethod(
|
||||
_osr_entry_point = NULL;
|
||||
_exception_cache = NULL;
|
||||
_pc_desc_cache.reset_to(NULL);
|
||||
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
|
||||
|
||||
code_buffer->copy_values_to(this);
|
||||
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
|
||||
@ -770,6 +769,7 @@ nmethod::nmethod(
|
||||
_osr_entry_point = NULL;
|
||||
_exception_cache = NULL;
|
||||
_pc_desc_cache.reset_to(NULL);
|
||||
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
|
||||
|
||||
code_buffer->copy_values_to(this);
|
||||
debug_only(verify_scavenge_root_oops());
|
||||
@ -842,6 +842,7 @@ nmethod::nmethod(
|
||||
_comp_level = comp_level;
|
||||
_compiler = compiler;
|
||||
_orig_pc_offset = orig_pc_offset;
|
||||
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
|
||||
|
||||
// Section offsets
|
||||
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
|
||||
@ -1176,7 +1177,7 @@ void nmethod::cleanup_inline_caches() {
|
||||
|
||||
// This is a private interface with the sweeper.
|
||||
void nmethod::mark_as_seen_on_stack() {
|
||||
assert(is_not_entrant(), "must be a non-entrant method");
|
||||
assert(is_alive(), "Must be an alive method");
|
||||
// Set the traversal mark to ensure that the sweeper does 2
|
||||
// cleaning passes before moving to zombie.
|
||||
set_stack_traversal_mark(NMethodSweeper::traversal_count());
|
||||
@ -1261,7 +1262,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
|
||||
set_osr_link(NULL);
|
||||
//set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
|
||||
NMethodSweeper::notify(this);
|
||||
NMethodSweeper::notify();
|
||||
}
|
||||
|
||||
void nmethod::invalidate_osr_method() {
|
||||
@ -1351,6 +1352,15 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
nmethod_needs_unregister = true;
|
||||
}
|
||||
|
||||
// Must happen before state change. Otherwise we have a race condition in
|
||||
// nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
|
||||
// transition its state from 'not_entrant' to 'zombie' without having to wait
|
||||
// for stack scanning.
|
||||
if (state == not_entrant) {
|
||||
mark_as_seen_on_stack();
|
||||
OrderAccess::storestore();
|
||||
}
|
||||
|
||||
// Change state
|
||||
_state = state;
|
||||
|
||||
@ -1369,11 +1379,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
HandleMark hm;
|
||||
method()->clear_code();
|
||||
}
|
||||
|
||||
if (state == not_entrant) {
|
||||
mark_as_seen_on_stack();
|
||||
}
|
||||
|
||||
} // leave critical region under Patching_lock
|
||||
|
||||
// When the nmethod becomes zombie it is no longer alive so the
|
||||
@ -1416,7 +1421,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
}
|
||||
|
||||
// Make sweeper aware that there is a zombie method that needs to be removed
|
||||
NMethodSweeper::notify(this);
|
||||
NMethodSweeper::notify();
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1451,10 +1456,6 @@ void nmethod::flush() {
|
||||
CodeCache::drop_scavenge_root_nmethod(this);
|
||||
}
|
||||
|
||||
if (is_speculatively_disconnected()) {
|
||||
CodeCache::remove_saved_code(this);
|
||||
}
|
||||
|
||||
#ifdef SHARK
|
||||
((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
|
||||
#endif // SHARK
|
||||
|
@ -119,7 +119,6 @@ class nmethod : public CodeBlob {
|
||||
// To support simple linked-list chaining of nmethods:
|
||||
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
|
||||
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
|
||||
nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
|
||||
|
||||
static nmethod* volatile _oops_do_mark_nmethods;
|
||||
nmethod* volatile _oops_do_mark_link;
|
||||
@ -165,7 +164,6 @@ class nmethod : public CodeBlob {
|
||||
|
||||
// protected by CodeCache_lock
|
||||
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
|
||||
bool _speculatively_disconnected; // Marked for potential unload
|
||||
|
||||
bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
|
||||
bool _marked_for_deoptimization; // Used for stack deoptimization
|
||||
@ -180,7 +178,7 @@ class nmethod : public CodeBlob {
|
||||
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
|
||||
|
||||
// Protected by Patching_lock
|
||||
unsigned char _state; // {alive, not_entrant, zombie, unloaded}
|
||||
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
|
||||
@ -202,11 +200,18 @@ class nmethod : public CodeBlob {
|
||||
|
||||
// not_entrant method removal. Each mark_sweep pass will update
|
||||
// this mark to current sweep invocation count if it is seen on the
|
||||
// stack. An not_entrant method can be removed when there is no
|
||||
// stack. An not_entrant method can be removed when there are no
|
||||
// more activations, i.e., when the _stack_traversal_mark is less than
|
||||
// current sweep traversal index.
|
||||
long _stack_traversal_mark;
|
||||
|
||||
// The _hotness_counter indicates the hotness of a method. The higher
|
||||
// the value the hotter the method. The hotness counter of a nmethod is
|
||||
// set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
|
||||
// is active while stack scanning (mark_active_nmethods()). The hotness
|
||||
// counter is decreased (by 1) while sweeping.
|
||||
int _hotness_counter;
|
||||
|
||||
ExceptionCache *_exception_cache;
|
||||
PcDescCache _pc_desc_cache;
|
||||
|
||||
@ -382,6 +387,10 @@ class nmethod : public CodeBlob {
|
||||
|
||||
int total_size () const;
|
||||
|
||||
void dec_hotness_counter() { _hotness_counter--; }
|
||||
void set_hotness_counter(int val) { _hotness_counter = val; }
|
||||
int hotness_counter() const { return _hotness_counter; }
|
||||
|
||||
// Containment
|
||||
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
|
||||
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
|
||||
@ -408,8 +417,8 @@ class nmethod : public CodeBlob {
|
||||
// alive. It is used when an uncommon trap happens. Returns true
|
||||
// if this thread changed the state of the nmethod or false if
|
||||
// another thread performed the transition.
|
||||
bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
|
||||
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
|
||||
bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
|
||||
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
|
||||
|
||||
// used by jvmti to track if the unload event has been reported
|
||||
bool unload_reported() { return _unload_reported; }
|
||||
@ -437,9 +446,6 @@ class nmethod : public CodeBlob {
|
||||
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|
||||
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
|
||||
|
||||
bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
|
||||
void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
|
||||
|
||||
bool is_lazy_critical_native() const { return _lazy_critical_native; }
|
||||
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
|
||||
|
||||
@ -499,9 +505,6 @@ public:
|
||||
nmethod* scavenge_root_link() const { return _scavenge_root_link; }
|
||||
void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
|
||||
|
||||
nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
|
||||
void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
|
||||
|
||||
public:
|
||||
|
||||
// Sweeper support
|
||||
|
@ -634,19 +634,36 @@ CompileTask* CompileQueue::get() {
|
||||
NMethodSweeper::possibly_sweep();
|
||||
|
||||
MutexLocker locker(lock());
|
||||
// Wait for an available CompileTask.
|
||||
// If _first is NULL we have no more compile jobs. There are two reasons for
|
||||
// having no compile jobs: First, we compiled everything we wanted. Second,
|
||||
// we ran out of code cache so compilation has been disabled. In the latter
|
||||
// case we perform code cache sweeps to free memory such that we can re-enable
|
||||
// compilation.
|
||||
while (_first == NULL) {
|
||||
// There is no work to be done right now. Wait.
|
||||
if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
|
||||
// During the emergency sweeping periods, wake up and sweep occasionally
|
||||
bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
|
||||
if (timedout) {
|
||||
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
|
||||
// Wait a certain amount of time to possibly do another sweep.
|
||||
// We must wait until stack scanning has happened so that we can
|
||||
// transition a method's state from 'not_entrant' to 'zombie'.
|
||||
long wait_time = NmethodSweepCheckInterval * 1000;
|
||||
if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
|
||||
// Only one thread at a time can do sweeping. Scale the
|
||||
// wait time according to the number of compiler threads.
|
||||
// As a result, the next sweep is likely to happen every 100ms
|
||||
// with an arbitrary number of threads that do sweeping.
|
||||
wait_time = 100 * CICompilerCount;
|
||||
}
|
||||
bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
|
||||
if (timeout) {
|
||||
MutexUnlocker ul(lock());
|
||||
// When otherwise not busy, run nmethod sweeping
|
||||
NMethodSweeper::possibly_sweep();
|
||||
}
|
||||
} else {
|
||||
// During normal operation no need to wake up on timer
|
||||
// If there are no compilation tasks and we can compile new jobs
|
||||
// (i.e., there is enough free space in the code cache) there is
|
||||
// no need to invoke the sweeper. As a result, the hotness of methods
|
||||
// remains unchanged. This behavior is desired, since we want to keep
|
||||
// the stable state, i.e., we do not want to evict methods from the
|
||||
// code cache if it is unnecessary.
|
||||
lock()->wait();
|
||||
}
|
||||
}
|
||||
@ -1227,16 +1244,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
return method_code;
|
||||
}
|
||||
}
|
||||
if (method->is_not_compilable(comp_level)) return NULL;
|
||||
|
||||
if (UseCodeCacheFlushing) {
|
||||
nmethod* saved = CodeCache::reanimate_saved_code(method());
|
||||
if (saved != NULL) {
|
||||
method->set_code(method, saved);
|
||||
return saved;
|
||||
}
|
||||
if (method->is_not_compilable(comp_level)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
} else {
|
||||
// osr compilation
|
||||
#ifndef TIERED
|
||||
@ -1585,9 +1595,6 @@ void CompileBroker::compiler_thread_loop() {
|
||||
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
|
||||
// the code cache is really full
|
||||
handle_full_code_cache();
|
||||
} else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
|
||||
// Attempt to start cleaning the code cache while there is still a little headroom
|
||||
NMethodSweeper::handle_full_code_cache(false);
|
||||
}
|
||||
|
||||
CompileTask* task = queue->get();
|
||||
@ -1943,7 +1950,11 @@ void CompileBroker::handle_full_code_cache() {
|
||||
}
|
||||
#endif
|
||||
if (UseCodeCacheFlushing) {
|
||||
NMethodSweeper::handle_full_code_cache(true);
|
||||
// Since code cache is full, immediately stop new compiles
|
||||
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
||||
NMethodSweeper::log_sweep("disable_compiler");
|
||||
NMethodSweeper::possibly_sweep();
|
||||
}
|
||||
} else {
|
||||
UseCompiler = false;
|
||||
AlwaysCompileLoopMethods = false;
|
||||
|
@ -81,7 +81,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm) {
|
||||
G1CollectedHeap* g1h = _g1h;
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
|
||||
CardTableModRefBS* ct_bs = g1h->g1_barrier_set();
|
||||
|
||||
HeapWord* start = mr.start();
|
||||
HeapWord* end = mr.end();
|
||||
|
@ -65,9 +65,7 @@ void G1CardCounts::initialize() {
|
||||
// threshold limit is no more than this.
|
||||
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
|
||||
|
||||
ModRefBarrierSet* bs = _g1h->mr_bs();
|
||||
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
|
||||
_ct_bs = (CardTableModRefBS*)bs;
|
||||
_ct_bs = _g1h->g1_barrier_set();
|
||||
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
|
||||
|
||||
// Allocate/Reserve the counts table
|
||||
|
@ -125,10 +125,8 @@ class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
|
||||
int _histo[256];
|
||||
public:
|
||||
ClearLoggedCardTableEntryClosure() :
|
||||
_calls(0)
|
||||
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
|
||||
{
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
_ctbs = (CardTableModRefBS*)_g1h->barrier_set();
|
||||
for (int i = 0; i < 256; i++) _histo[i] = 0;
|
||||
}
|
||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
||||
@ -158,11 +156,8 @@ class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
|
||||
CardTableModRefBS* _ctbs;
|
||||
public:
|
||||
RedirtyLoggedCardTableEntryClosure() :
|
||||
_calls(0)
|
||||
{
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
_ctbs = (CardTableModRefBS*)_g1h->barrier_set();
|
||||
}
|
||||
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
|
||||
|
||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
||||
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
||||
_calls++;
|
||||
@ -478,7 +473,7 @@ bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||
|
||||
void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
|
||||
CardTableModRefBS* ct_bs = g1_barrier_set();
|
||||
|
||||
// Count the dirty cards at the start.
|
||||
CountNonCleanMemRegionClosure count1(this);
|
||||
@ -1205,7 +1200,7 @@ public:
|
||||
};
|
||||
|
||||
void G1CollectedHeap::clear_rsets_post_compaction() {
|
||||
PostMCRemSetClearClosure rs_clear(this, mr_bs());
|
||||
PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
|
||||
heap_region_iterate(&rs_clear);
|
||||
}
|
||||
|
||||
@ -1777,7 +1772,6 @@ void G1CollectedHeap::update_committed_space(HeapWord* old_end,
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::expand(size_t expand_bytes) {
|
||||
size_t old_mem_size = _g1_storage.committed_size();
|
||||
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
|
||||
aligned_expand_bytes = align_size_up(aligned_expand_bytes,
|
||||
HeapRegion::GrainBytes);
|
||||
@ -1787,6 +1781,13 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
|
||||
ergo_format_byte("attempted expansion amount"),
|
||||
expand_bytes, aligned_expand_bytes);
|
||||
|
||||
if (_g1_storage.uncommitted_size() == 0) {
|
||||
ergo_verbose0(ErgoHeapSizing,
|
||||
"did not expand the heap",
|
||||
ergo_format_reason("heap already fully expanded"));
|
||||
return false;
|
||||
}
|
||||
|
||||
// First commit the memory.
|
||||
HeapWord* old_end = (HeapWord*) _g1_storage.high();
|
||||
bool successful = _g1_storage.expand_by(aligned_expand_bytes);
|
||||
@ -1845,7 +1846,6 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
|
||||
size_t old_mem_size = _g1_storage.committed_size();
|
||||
size_t aligned_shrink_bytes =
|
||||
ReservedSpace::page_align_size_down(shrink_bytes);
|
||||
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
|
||||
@ -2045,20 +2045,13 @@ jint G1CollectedHeap::initialize() {
|
||||
// Create the gen rem set (and barrier set) for the entire reserved region.
|
||||
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
|
||||
set_barrier_set(rem_set()->bs());
|
||||
if (barrier_set()->is_a(BarrierSet::ModRef)) {
|
||||
_mr_bs = (ModRefBarrierSet*)_barrier_set;
|
||||
} else {
|
||||
vm_exit_during_initialization("G1 requires a mod ref bs.");
|
||||
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
|
||||
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
|
||||
// Also create a G1 rem set.
|
||||
if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
|
||||
_g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
|
||||
} else {
|
||||
vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
_g1_rem_set = new G1RemSet(this, g1_barrier_set());
|
||||
|
||||
// Carve out the G1 part of the heap.
|
||||
|
||||
@ -3681,6 +3674,11 @@ void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
||||
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
||||
// Fill TLAB's and such
|
||||
ensure_parsability(true);
|
||||
|
||||
if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
|
||||
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
|
||||
g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||
@ -3689,7 +3687,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||
(G1SummarizeRSetStatsPeriod > 0) &&
|
||||
// we are at the end of the GC. Total collections has already been increased.
|
||||
((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
|
||||
g1_rem_set()->print_periodic_summary_info();
|
||||
g1_rem_set()->print_periodic_summary_info("After GC RS summary");
|
||||
}
|
||||
|
||||
// FIXME: what is this about?
|
||||
@ -4550,7 +4548,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
|
||||
: _g1h(g1h),
|
||||
_refs(g1h->task_queue(queue_num)),
|
||||
_dcq(&g1h->dirty_card_queue_set()),
|
||||
_ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
|
||||
_ct_bs(g1h->g1_barrier_set()),
|
||||
_g1_rem(g1h->g1_rem_set()),
|
||||
_hash_seed(17), _queue_num(queue_num),
|
||||
_term_attempts(0),
|
||||
@ -5979,11 +5977,11 @@ void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
|
||||
}
|
||||
|
||||
class G1ParCleanupCTTask : public AbstractGangTask {
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
G1CollectedHeap* _g1h;
|
||||
HeapRegion* volatile _su_head;
|
||||
public:
|
||||
G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
|
||||
G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
|
||||
G1CollectedHeap* g1h) :
|
||||
AbstractGangTask("G1 Par Cleanup CT Task"),
|
||||
_ct_bs(ct_bs), _g1h(g1h) { }
|
||||
@ -6006,9 +6004,9 @@ public:
|
||||
#ifndef PRODUCT
|
||||
class G1VerifyCardTableCleanup: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
public:
|
||||
G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
|
||||
G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
|
||||
: _g1h(g1h), _ct_bs(ct_bs) { }
|
||||
virtual bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->is_survivor()) {
|
||||
@ -6022,7 +6020,7 @@ public:
|
||||
|
||||
void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
|
||||
// All of the region should be clean.
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
|
||||
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
ct_bs->verify_not_dirty_region(mr);
|
||||
}
|
||||
@ -6035,13 +6033,13 @@ void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
|
||||
// not dirty that area (one less thing to have to do while holding
|
||||
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
|
||||
// is dirty.
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
|
||||
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
|
||||
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
|
||||
ct_bs->verify_dirty_region(mr);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
|
||||
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
|
||||
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
|
||||
verify_dirty_region(hr);
|
||||
}
|
||||
@ -6053,7 +6051,7 @@ void G1CollectedHeap::verify_dirty_young_regions() {
|
||||
#endif
|
||||
|
||||
void G1CollectedHeap::cleanUpCardTable() {
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
|
||||
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
|
||||
double start = os::elapsedTime();
|
||||
|
||||
{
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "gc_implementation/g1/g1HRPrinter.hpp"
|
||||
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/g1YCTypes.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSets.hpp"
|
||||
@ -791,8 +792,6 @@ protected:
|
||||
|
||||
// The g1 remembered set of the heap.
|
||||
G1RemSet* _g1_rem_set;
|
||||
// And it's mod ref barrier set, used to track updates for the above.
|
||||
ModRefBarrierSet* _mr_bs;
|
||||
|
||||
// A set of cards that cover the objects for which the Rsets should be updated
|
||||
// concurrently after the collection.
|
||||
@ -1127,7 +1126,6 @@ public:
|
||||
|
||||
// The rem set and barrier set.
|
||||
G1RemSet* g1_rem_set() const { return _g1_rem_set; }
|
||||
ModRefBarrierSet* mr_bs() const { return _mr_bs; }
|
||||
|
||||
unsigned get_gc_time_stamp() {
|
||||
return _gc_time_stamp;
|
||||
@ -1346,6 +1344,10 @@ public:
|
||||
|
||||
virtual bool is_in_closed_subset(const void* p) const;
|
||||
|
||||
G1SATBCardTableModRefBS* g1_barrier_set() {
|
||||
return (G1SATBCardTableModRefBS*) barrier_set();
|
||||
}
|
||||
|
||||
// This resets the card table to all zeros. It is used after
|
||||
// a collection pause which used the card table to claim cards.
|
||||
void cleanUpCardTable();
|
||||
@ -1875,7 +1877,7 @@ protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
RefToScanQueue* _refs;
|
||||
DirtyCardQueue _dcq;
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
G1ParGCAllocBufferContainer _surviving_alloc_buffer;
|
||||
@ -1914,7 +1916,7 @@ protected:
|
||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||
|
||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
CardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
|
||||
template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
|
@ -134,7 +134,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
||||
|
||||
MemRegion mr(start, end);
|
||||
((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
|
||||
g1_barrier_set()->dirty(mr);
|
||||
}
|
||||
|
||||
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
|
||||
|
@ -41,11 +41,11 @@ class UpdateRSetDeferred : public OopsInHeapRegionClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1;
|
||||
DirtyCardQueue *_dcq;
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
|
||||
public:
|
||||
UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
|
||||
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
|
||||
_g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
|
||||
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
@ -220,7 +220,7 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
|
||||
public:
|
||||
G1PrepareCompactClosure(CompactibleSpace* cs)
|
||||
: _g1h(G1CollectedHeap::heap()),
|
||||
_mrbs(G1CollectedHeap::heap()->mr_bs()),
|
||||
_mrbs(_g1h->g1_barrier_set()),
|
||||
_cp(NULL, cs, cs->initialize_threshold()),
|
||||
_humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
|
||||
|
||||
|
@ -83,7 +83,9 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
||||
for (uint i = 0; i < n_workers(); i++) {
|
||||
_cset_rs_update_cl[i] = NULL;
|
||||
}
|
||||
_prev_period_summary.initialize(this, n_workers());
|
||||
if (G1SummarizeRSetStats) {
|
||||
_prev_period_summary.initialize(this);
|
||||
}
|
||||
}
|
||||
|
||||
G1RemSet::~G1RemSet() {
|
||||
@ -109,7 +111,7 @@ class ScanRSClosure : public HeapRegionClosure {
|
||||
CodeBlobToOopClosure* _code_root_cl;
|
||||
|
||||
G1BlockOffsetSharedArray* _bot_shared;
|
||||
CardTableModRefBS *_ct_bs;
|
||||
G1SATBCardTableModRefBS *_ct_bs;
|
||||
|
||||
double _strong_code_root_scan_time_sec;
|
||||
int _worker_i;
|
||||
@ -130,7 +132,7 @@ public:
|
||||
{
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
_bot_shared = _g1h->bot_shared();
|
||||
_ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
|
||||
_ct_bs = _g1h->g1_barrier_set();
|
||||
_block_size = MAX2<int>(G1RSetScanBlockSize, 1);
|
||||
}
|
||||
|
||||
@ -505,12 +507,7 @@ public:
|
||||
ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_region_bm(region_bm), _card_bm(card_bm),
|
||||
_ctbs(NULL)
|
||||
{
|
||||
ModRefBarrierSet* bs = _g1h->mr_bs();
|
||||
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
|
||||
_ctbs = (CardTableModRefBS*)bs;
|
||||
}
|
||||
_ctbs(_g1h->g1_barrier_set()) {}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
@ -731,19 +728,19 @@ bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
|
||||
return has_refs_into_cset;
|
||||
}
|
||||
|
||||
void G1RemSet::print_periodic_summary_info() {
|
||||
void G1RemSet::print_periodic_summary_info(const char* header) {
|
||||
G1RemSetSummary current;
|
||||
current.initialize(this, n_workers());
|
||||
current.initialize(this);
|
||||
|
||||
_prev_period_summary.subtract_from(¤t);
|
||||
print_summary_info(&_prev_period_summary);
|
||||
print_summary_info(&_prev_period_summary, header);
|
||||
|
||||
_prev_period_summary.set(¤t);
|
||||
}
|
||||
|
||||
void G1RemSet::print_summary_info() {
|
||||
G1RemSetSummary current;
|
||||
current.initialize(this, n_workers());
|
||||
current.initialize(this);
|
||||
|
||||
print_summary_info(¤t, " Cumulative RS summary");
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ public:
|
||||
virtual void print_summary_info();
|
||||
|
||||
// Print accumulated summary info from the last time called.
|
||||
virtual void print_periodic_summary_info();
|
||||
virtual void print_periodic_summary_info(const char* header);
|
||||
|
||||
// Prepare remembered set for verification.
|
||||
virtual void prepare_for_verify();
|
||||
|
@ -77,12 +77,12 @@ double G1RemSetSummary::rs_thread_vtime(uint thread) const {
|
||||
return _rs_threads_vtimes[thread];
|
||||
}
|
||||
|
||||
void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) {
|
||||
void G1RemSetSummary::initialize(G1RemSet* remset) {
|
||||
assert(_rs_threads_vtimes == NULL, "just checking");
|
||||
assert(remset != NULL, "just checking");
|
||||
|
||||
_remset = remset;
|
||||
_num_vtimes = num_workers;
|
||||
_num_vtimes = ConcurrentG1Refine::thread_num();
|
||||
_rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
|
||||
memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
|
||||
|
||||
@ -125,25 +125,115 @@ void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
|
||||
_sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
|
||||
}
|
||||
|
||||
class HRRSStatsIter: public HeapRegionClosure {
|
||||
size_t _occupied;
|
||||
static double percent_of(size_t numerator, size_t denominator) {
|
||||
if (denominator != 0) {
|
||||
return (double)numerator / denominator * 100.0f;
|
||||
} else {
|
||||
return 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t round_to_K(size_t value) {
|
||||
return value / K;
|
||||
}
|
||||
|
||||
class RegionTypeCounter VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
const char* _name;
|
||||
|
||||
size_t _rs_mem_size;
|
||||
size_t _cards_occupied;
|
||||
size_t _amount;
|
||||
|
||||
size_t _code_root_mem_size;
|
||||
size_t _code_root_elems;
|
||||
|
||||
double rs_mem_size_percent_of(size_t total) {
|
||||
return percent_of(_rs_mem_size, total);
|
||||
}
|
||||
|
||||
double cards_occupied_percent_of(size_t total) {
|
||||
return percent_of(_cards_occupied, total);
|
||||
}
|
||||
|
||||
double code_root_mem_size_percent_of(size_t total) {
|
||||
return percent_of(_code_root_mem_size, total);
|
||||
}
|
||||
|
||||
double code_root_elems_percent_of(size_t total) {
|
||||
return percent_of(_code_root_elems, total);
|
||||
}
|
||||
|
||||
size_t amount() const { return _amount; }
|
||||
|
||||
public:
|
||||
|
||||
RegionTypeCounter(const char* name) : _name(name), _rs_mem_size(0), _cards_occupied(0),
|
||||
_amount(0), _code_root_mem_size(0), _code_root_elems(0) { }
|
||||
|
||||
void add(size_t rs_mem_size, size_t cards_occupied, size_t code_root_mem_size,
|
||||
size_t code_root_elems) {
|
||||
_rs_mem_size += rs_mem_size;
|
||||
_cards_occupied += cards_occupied;
|
||||
_code_root_mem_size += code_root_mem_size;
|
||||
_code_root_elems += code_root_elems;
|
||||
_amount++;
|
||||
}
|
||||
|
||||
size_t rs_mem_size() const { return _rs_mem_size; }
|
||||
size_t cards_occupied() const { return _cards_occupied; }
|
||||
|
||||
size_t code_root_mem_size() const { return _code_root_mem_size; }
|
||||
size_t code_root_elems() const { return _code_root_elems; }
|
||||
|
||||
void print_rs_mem_info_on(outputStream * out, size_t total) {
|
||||
out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
|
||||
}
|
||||
|
||||
void print_cards_occupied_info_on(outputStream * out, size_t total) {
|
||||
out->print_cr(" %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
|
||||
}
|
||||
|
||||
void print_code_root_mem_info_on(outputStream * out, size_t total) {
|
||||
out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
|
||||
}
|
||||
|
||||
void print_code_root_elems_info_on(outputStream * out, size_t total) {
|
||||
out->print_cr(" %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class HRRSStatsIter: public HeapRegionClosure {
|
||||
private:
|
||||
RegionTypeCounter _young;
|
||||
RegionTypeCounter _humonguous;
|
||||
RegionTypeCounter _free;
|
||||
RegionTypeCounter _old;
|
||||
RegionTypeCounter _all;
|
||||
|
||||
size_t _total_rs_mem_sz;
|
||||
size_t _max_rs_mem_sz;
|
||||
HeapRegion* _max_rs_mem_sz_region;
|
||||
|
||||
size_t _total_code_root_mem_sz;
|
||||
size_t total_rs_mem_sz() const { return _all.rs_mem_size(); }
|
||||
size_t total_cards_occupied() const { return _all.cards_occupied(); }
|
||||
|
||||
size_t max_rs_mem_sz() const { return _max_rs_mem_sz; }
|
||||
HeapRegion* max_rs_mem_sz_region() const { return _max_rs_mem_sz_region; }
|
||||
|
||||
size_t _max_code_root_mem_sz;
|
||||
HeapRegion* _max_code_root_mem_sz_region;
|
||||
|
||||
size_t total_code_root_mem_sz() const { return _all.code_root_mem_size(); }
|
||||
size_t total_code_root_elems() const { return _all.code_root_elems(); }
|
||||
|
||||
size_t max_code_root_mem_sz() const { return _max_code_root_mem_sz; }
|
||||
HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
|
||||
|
||||
public:
|
||||
HRRSStatsIter() :
|
||||
_occupied(0),
|
||||
_total_rs_mem_sz(0),
|
||||
_max_rs_mem_sz(0),
|
||||
_max_rs_mem_sz_region(NULL),
|
||||
_total_code_root_mem_sz(0),
|
||||
_max_code_root_mem_sz(0),
|
||||
_max_code_root_mem_sz_region(NULL)
|
||||
HRRSStatsIter() : _all("All"), _young("Young"), _humonguous("Humonguous"),
|
||||
_free("Free"), _old("Old"), _max_code_root_mem_sz_region(NULL), _max_rs_mem_sz_region(NULL),
|
||||
_max_rs_mem_sz(0), _max_code_root_mem_sz(0)
|
||||
{}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
@ -156,46 +246,95 @@ public:
|
||||
_max_rs_mem_sz = rs_mem_sz;
|
||||
_max_rs_mem_sz_region = r;
|
||||
}
|
||||
_total_rs_mem_sz += rs_mem_sz;
|
||||
|
||||
size_t occupied_cards = hrrs->occupied();
|
||||
size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
|
||||
if (code_root_mem_sz > _max_code_root_mem_sz) {
|
||||
_max_code_root_mem_sz = code_root_mem_sz;
|
||||
if (code_root_mem_sz > max_code_root_mem_sz()) {
|
||||
_max_code_root_mem_sz_region = r;
|
||||
}
|
||||
_total_code_root_mem_sz += code_root_mem_sz;
|
||||
size_t code_root_elems = hrrs->strong_code_roots_list_length();
|
||||
|
||||
RegionTypeCounter* current = NULL;
|
||||
if (r->is_young()) {
|
||||
current = &_young;
|
||||
} else if (r->isHumongous()) {
|
||||
current = &_humonguous;
|
||||
} else if (r->is_empty()) {
|
||||
current = &_free;
|
||||
} else {
|
||||
current = &_old;
|
||||
}
|
||||
current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
|
||||
_all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
|
||||
|
||||
size_t occ = hrrs->occupied();
|
||||
_occupied += occ;
|
||||
return false;
|
||||
}
|
||||
size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
|
||||
size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
|
||||
HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
|
||||
size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
|
||||
size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
|
||||
HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
|
||||
size_t occupied() { return _occupied; }
|
||||
|
||||
void print_summary_on(outputStream* out) {
|
||||
RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL };
|
||||
|
||||
out->print_cr("\n Current rem set statistics");
|
||||
out->print_cr(" Total per region rem sets sizes = "SIZE_FORMAT"K."
|
||||
" Max = "SIZE_FORMAT"K.",
|
||||
round_to_K(total_rs_mem_sz()), round_to_K(max_rs_mem_sz()));
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
|
||||
(*current)->print_rs_mem_info_on(out, total_rs_mem_sz());
|
||||
}
|
||||
|
||||
out->print_cr(" Static structures = "SIZE_FORMAT"K,"
|
||||
" free_lists = "SIZE_FORMAT"K.",
|
||||
round_to_K(HeapRegionRemSet::static_mem_size()),
|
||||
round_to_K(HeapRegionRemSet::fl_mem_size()));
|
||||
|
||||
out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
|
||||
total_cards_occupied());
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
|
||||
(*current)->print_cards_occupied_info_on(out, total_cards_occupied());
|
||||
}
|
||||
|
||||
// Largest sized rem set region statistics
|
||||
HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set();
|
||||
out->print_cr(" Region with largest rem set = "HR_FORMAT", "
|
||||
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
|
||||
HR_FORMAT_PARAMS(max_rs_mem_sz_region()),
|
||||
round_to_K(rem_set->mem_size()),
|
||||
round_to_K(rem_set->occupied()));
|
||||
|
||||
// Strong code root statistics
|
||||
HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set();
|
||||
out->print_cr(" Total heap region code root sets sizes = "SIZE_FORMAT"K."
|
||||
" Max = "SIZE_FORMAT"K.",
|
||||
round_to_K(total_code_root_mem_sz()),
|
||||
round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()));
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
|
||||
(*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz());
|
||||
}
|
||||
|
||||
out->print_cr(" "SIZE_FORMAT" code roots represented.",
|
||||
total_code_root_elems());
|
||||
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
|
||||
(*current)->print_code_root_elems_info_on(out, total_code_root_elems());
|
||||
}
|
||||
|
||||
out->print_cr(" Region with largest amount of code roots = "HR_FORMAT", "
|
||||
"size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
|
||||
HR_FORMAT_PARAMS(max_code_root_mem_sz_region()),
|
||||
round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()),
|
||||
round_to_K(max_code_root_rem_set->strong_code_roots_list_length()));
|
||||
}
|
||||
};
|
||||
|
||||
double calc_percentage(size_t numerator, size_t denominator) {
|
||||
if (denominator != 0) {
|
||||
return (double)numerator / denominator * 100.0;
|
||||
} else {
|
||||
return 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
void G1RemSetSummary::print_on(outputStream* out) {
|
||||
out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards",
|
||||
out->print_cr("\n Recent concurrent refinement statistics");
|
||||
out->print_cr(" Processed "SIZE_FORMAT" cards",
|
||||
num_concurrent_refined_cards());
|
||||
out->print_cr(" Of %d completed buffers:", num_processed_buf_total());
|
||||
out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.",
|
||||
num_processed_buf_total(),
|
||||
calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total()));
|
||||
percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
|
||||
out->print_cr(" %8d (%5.1f%%) by mutator threads.",
|
||||
num_processed_buf_mutator(),
|
||||
calc_percentage(num_processed_buf_mutator(), num_processed_buf_total()));
|
||||
percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
|
||||
out->print_cr(" Did %d coarsenings.", num_coarsenings());
|
||||
out->print_cr(" Concurrent RS threads times (s)");
|
||||
out->print(" ");
|
||||
for (uint i = 0; i < _num_vtimes; i++) {
|
||||
@ -207,33 +346,5 @@ void G1RemSetSummary::print_on(outputStream* out) {
|
||||
|
||||
HRRSStatsIter blk;
|
||||
G1CollectedHeap::heap()->heap_region_iterate(&blk);
|
||||
// RemSet stats
|
||||
out->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
|
||||
" Max = "SIZE_FORMAT"K.",
|
||||
blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
|
||||
out->print_cr(" Static structures = "SIZE_FORMAT"K,"
|
||||
" free_lists = "SIZE_FORMAT"K.",
|
||||
HeapRegionRemSet::static_mem_size() / K,
|
||||
HeapRegionRemSet::fl_mem_size() / K);
|
||||
out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
|
||||
blk.occupied());
|
||||
HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
|
||||
HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
|
||||
out->print_cr(" Max size region = "HR_FORMAT", "
|
||||
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
|
||||
HR_FORMAT_PARAMS(max_rs_mem_sz_region),
|
||||
(max_rs_rem_set->mem_size() + K - 1)/K,
|
||||
(max_rs_rem_set->occupied() + K - 1)/K);
|
||||
out->print_cr(" Did %d coarsenings.", num_coarsenings());
|
||||
// Strong code root stats
|
||||
out->print_cr(" Total heap region code-root set sizes = "SIZE_FORMAT"K."
|
||||
" Max = "SIZE_FORMAT"K.",
|
||||
blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
|
||||
HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
|
||||
HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
|
||||
out->print_cr(" Max size region = "HR_FORMAT", "
|
||||
"size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
|
||||
HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
|
||||
(max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
|
||||
(max_code_root_rem_set->strong_code_roots_list_length()));
|
||||
blk.print_summary_on(out);
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
void subtract_from(G1RemSetSummary* other);
|
||||
|
||||
// initialize and get the first sampling
|
||||
void initialize(G1RemSet* remset, uint num_workers);
|
||||
void initialize(G1RemSet* remset);
|
||||
|
||||
void print_on(outputStream* out);
|
||||
|
||||
|
@ -64,6 +64,27 @@ G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
|
||||
}
|
||||
}
|
||||
|
||||
bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
// It's already processed
|
||||
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
|
||||
return false;
|
||||
}
|
||||
// Cached bit can be installed either on a clean card or on a claimed card.
|
||||
jbyte new_val = val;
|
||||
if (val == clean_card_val()) {
|
||||
new_val = (jbyte)deferred_card_val();
|
||||
} else {
|
||||
if (val & claimed_card_val()) {
|
||||
new_val = val | (jbyte)deferred_card_val();
|
||||
}
|
||||
}
|
||||
if (new_val != val) {
|
||||
Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
G1SATBCardTableLoggingModRefBS::
|
||||
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
||||
int max_covered_regions) :
|
||||
|
@ -89,6 +89,42 @@ public:
|
||||
write_ref_array_pre_work(dst, count);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Claimed and deferred bits are used together in G1 during the evacuation
|
||||
pause. These bits can have the following state transitions:
|
||||
1. The claimed bit can be put over any other card state. Except that
|
||||
the "dirty -> dirty and claimed" transition is checked for in
|
||||
G1 code and is not used.
|
||||
2. Deferred bit can be set only if the previous state of the card
|
||||
was either clean or claimed. mark_card_deferred() is wait-free.
|
||||
We do not care if the operation is be successful because if
|
||||
it does not it will only result in duplicate entry in the update
|
||||
buffer because of the "cache-miss". So it's not worth spinning.
|
||||
*/
|
||||
|
||||
bool is_card_claimed(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
|
||||
}
|
||||
|
||||
void set_card_claimed(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
if (val == clean_card_val()) {
|
||||
val = (jbyte)claimed_card_val();
|
||||
} else {
|
||||
val |= (jbyte)claimed_card_val();
|
||||
}
|
||||
_byte_map[card_index] = val;
|
||||
}
|
||||
|
||||
bool mark_card_deferred(size_t card_index);
|
||||
|
||||
bool is_card_deferred(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
// Adds card-table logging to the post-barrier.
|
||||
|
@ -40,10 +40,8 @@ class GenerationSizer : public TwoGenerationCollectorPolicy {
|
||||
|
||||
void initialize_flags() {
|
||||
// Do basic sizing work
|
||||
this->TwoGenerationCollectorPolicy::initialize_flags();
|
||||
TwoGenerationCollectorPolicy::initialize_flags();
|
||||
|
||||
// If the user hasn't explicitly set the number of worker
|
||||
// threads, set the count.
|
||||
assert(UseSerialGC ||
|
||||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
|
||||
(ParallelGCThreads > 0),
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
|
@ -53,7 +53,6 @@
|
||||
|
||||
// Forward decls
|
||||
class elapsedTimer;
|
||||
class GenerationSizer;
|
||||
|
||||
class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
||||
friend class PSGCAdaptivePolicyCounters;
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/pcTasks.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
|
||||
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
|
||||
|
@ -423,60 +423,6 @@ void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
|
||||
inline_write_ref_field(field, newVal);
|
||||
}
|
||||
|
||||
/*
|
||||
Claimed and deferred bits are used together in G1 during the evacuation
|
||||
pause. These bits can have the following state transitions:
|
||||
1. The claimed bit can be put over any other card state. Except that
|
||||
the "dirty -> dirty and claimed" transition is checked for in
|
||||
G1 code and is not used.
|
||||
2. Deferred bit can be set only if the previous state of the card
|
||||
was either clean or claimed. mark_card_deferred() is wait-free.
|
||||
We do not care if the operation is be successful because if
|
||||
it does not it will only result in duplicate entry in the update
|
||||
buffer because of the "cache-miss". So it's not worth spinning.
|
||||
*/
|
||||
|
||||
|
||||
bool CardTableModRefBS::claim_card(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
|
||||
while (val == clean_card_val() ||
|
||||
(val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
|
||||
jbyte new_val = val;
|
||||
if (val == clean_card_val()) {
|
||||
new_val = (jbyte)claimed_card_val();
|
||||
} else {
|
||||
new_val = val | (jbyte)claimed_card_val();
|
||||
}
|
||||
jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
|
||||
if (res == val) {
|
||||
return true;
|
||||
}
|
||||
val = res;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
// It's already processed
|
||||
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
|
||||
return false;
|
||||
}
|
||||
// Cached bit can be installed either on a clean card or on a claimed card.
|
||||
jbyte new_val = val;
|
||||
if (val == clean_card_val()) {
|
||||
new_val = (jbyte)deferred_card_val();
|
||||
} else {
|
||||
if (val & claimed_card_val()) {
|
||||
new_val = val | (jbyte)deferred_card_val();
|
||||
}
|
||||
}
|
||||
if (new_val != val) {
|
||||
Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
|
||||
MemRegion mr,
|
||||
|
@ -339,34 +339,10 @@ public:
|
||||
_byte_map[card_index] = dirty_card_val();
|
||||
}
|
||||
|
||||
bool is_card_claimed(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
|
||||
}
|
||||
|
||||
void set_card_claimed(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
if (val == clean_card_val()) {
|
||||
val = (jbyte)claimed_card_val();
|
||||
} else {
|
||||
val |= (jbyte)claimed_card_val();
|
||||
}
|
||||
_byte_map[card_index] = val;
|
||||
}
|
||||
|
||||
bool claim_card(size_t card_index);
|
||||
|
||||
bool is_card_clean(size_t card_index) {
|
||||
return _byte_map[card_index] == clean_card_val();
|
||||
}
|
||||
|
||||
bool is_card_deferred(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
|
||||
}
|
||||
|
||||
bool mark_card_deferred(size_t card_index);
|
||||
|
||||
// Card marking array base (adjusted for heap low boundary)
|
||||
// This would be the 0th element of _byte_map, if the heap started at 0x0.
|
||||
// But since the heap starts at some higher address, this points to somewhere
|
||||
|
@ -64,19 +64,21 @@ void CollectorPolicy::initialize_flags() {
|
||||
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
|
||||
}
|
||||
|
||||
if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
|
||||
FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
|
||||
restricted_align_down(MaxMetaspaceSize, max_alignment()));
|
||||
}
|
||||
// Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
|
||||
// override if MaxMetaspaceSize was set on the command line or not.
|
||||
// This information is needed later to conform to the specification of the
|
||||
// java.lang.management.MemoryUsage API.
|
||||
//
|
||||
// Ideally, we would be able to set the default value of MaxMetaspaceSize in
|
||||
// globals.hpp to the aligned value, but this is not possible, since the
|
||||
// alignment depends on other flags being parsed.
|
||||
MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment());
|
||||
|
||||
if (MetaspaceSize > MaxMetaspaceSize) {
|
||||
FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
|
||||
MetaspaceSize = MaxMetaspaceSize;
|
||||
}
|
||||
|
||||
if (!is_size_aligned(MetaspaceSize, min_alignment())) {
|
||||
FLAG_SET_ERGO(uintx, MetaspaceSize,
|
||||
restricted_align_down(MetaspaceSize, min_alignment()));
|
||||
}
|
||||
MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment());
|
||||
|
||||
assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
|
||||
|
||||
@ -135,15 +137,8 @@ bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
|
||||
|
||||
GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
|
||||
int max_covered_regions) {
|
||||
switch (rem_set_name()) {
|
||||
case GenRemSet::CardTable: {
|
||||
CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
|
||||
return res;
|
||||
}
|
||||
default:
|
||||
guarantee(false, "unrecognized GenRemSet::Name");
|
||||
return NULL;
|
||||
}
|
||||
assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name");
|
||||
return new CardTableRS(whole_heap, max_covered_regions);
|
||||
}
|
||||
|
||||
void CollectorPolicy::cleared_all_soft_refs() {
|
||||
|
@ -32,13 +32,8 @@
|
||||
// enumeration.)
|
||||
|
||||
uintx GenRemSet::max_alignment_constraint(Name nm) {
|
||||
switch (nm) {
|
||||
case GenRemSet::CardTable:
|
||||
return CardTableRS::ct_max_alignment_constraint();
|
||||
default:
|
||||
guarantee(false, "Unrecognized GenRemSet type.");
|
||||
return (0); // Make Windows compiler happy
|
||||
}
|
||||
assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type.");
|
||||
return CardTableRS::ct_max_alignment_constraint();
|
||||
}
|
||||
|
||||
class HasAccumulatedModifiedOopsClosure : public KlassClosure {
|
||||
|
@ -3104,7 +3104,7 @@ size_t Metaspace::align_word_size_up(size_t word_size) {
|
||||
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
|
||||
// DumpSharedSpaces doesn't use class metadata area (yet)
|
||||
// Also, don't use class_vsm() unless UseCompressedClassPointers is true.
|
||||
if (mdtype == ClassType && using_class_space()) {
|
||||
if (is_class_space_allocation(mdtype)) {
|
||||
return class_vsm()->allocate(word_size);
|
||||
} else {
|
||||
return vsm()->allocate(word_size);
|
||||
@ -3252,8 +3252,8 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
MetaspaceAux::dump(gclog_or_tty);
|
||||
}
|
||||
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
|
||||
const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
|
||||
"Metadata space";
|
||||
const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
|
||||
"Metadata space";
|
||||
report_java_out_of_memory(space_string);
|
||||
|
||||
if (JvmtiExport::should_post_resource_exhausted()) {
|
||||
@ -3261,7 +3261,7 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
|
||||
space_string);
|
||||
}
|
||||
if (mdtype == ClassType) {
|
||||
if (is_class_space_allocation(mdtype)) {
|
||||
THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
|
||||
} else {
|
||||
THROW_OOP_0(Universe::out_of_memory_error_metaspace());
|
||||
|
@ -235,6 +235,9 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
|
||||
}
|
||||
|
||||
static bool is_class_space_allocation(MetadataType mdType) {
|
||||
return mdType == ClassType && using_class_space();
|
||||
}
|
||||
};
|
||||
|
||||
class MetaspaceAux : AllStatic {
|
||||
|
@ -901,16 +901,6 @@ address Method::make_adapters(methodHandle mh, TRAPS) {
|
||||
// This function must not hit a safepoint!
|
||||
address Method::verified_code_entry() {
|
||||
debug_only(No_Safepoint_Verifier nsv;)
|
||||
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
|
||||
if (code == NULL && UseCodeCacheFlushing) {
|
||||
nmethod *saved_code = CodeCache::reanimate_saved_code(this);
|
||||
if (saved_code != NULL) {
|
||||
methodHandle method(this);
|
||||
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
|
||||
set_code( method, saved_code );
|
||||
}
|
||||
}
|
||||
|
||||
assert(_from_compiled_entry != NULL, "must be set");
|
||||
return _from_compiled_entry;
|
||||
}
|
||||
|
@ -636,7 +636,9 @@
|
||||
\
|
||||
diagnostic(bool, OptimizeExpensiveOps, true, \
|
||||
"Find best control for expensive operations") \
|
||||
|
||||
\
|
||||
product(bool, UseMathExactIntrinsics, true, \
|
||||
"Enables intrinsification of various java.lang.Math funcitons")
|
||||
|
||||
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
|
||||
|
||||
|
@ -65,6 +65,8 @@ class CallGenerator : public ResourceObj {
|
||||
virtual bool is_predicted() const { return false; }
|
||||
// is_trap: Does not return to the caller. (E.g., uncommon trap.)
|
||||
virtual bool is_trap() const { return false; }
|
||||
// does_virtual_dispatch: Should try inlining as normal method first.
|
||||
virtual bool does_virtual_dispatch() const { return false; }
|
||||
|
||||
// is_late_inline: supports conversion of call into an inline
|
||||
virtual bool is_late_inline() const { return false; }
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/multnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
|
@ -29,6 +29,7 @@ macro(AbsD)
|
||||
macro(AbsF)
|
||||
macro(AbsI)
|
||||
macro(AddD)
|
||||
macro(AddExactI)
|
||||
macro(AddF)
|
||||
macro(AddI)
|
||||
macro(AddL)
|
||||
@ -133,6 +134,7 @@ macro(EncodePKlass)
|
||||
macro(ExpD)
|
||||
macro(FastLock)
|
||||
macro(FastUnlock)
|
||||
macro(FlagsProj)
|
||||
macro(Goto)
|
||||
macro(Halt)
|
||||
macro(If)
|
||||
@ -167,6 +169,7 @@ macro(Loop)
|
||||
macro(LoopLimit)
|
||||
macro(Mach)
|
||||
macro(MachProj)
|
||||
macro(MathExact)
|
||||
macro(MaxI)
|
||||
macro(MemBarAcquire)
|
||||
macro(MemBarAcquireLock)
|
||||
|
@ -110,6 +110,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
// then we return it as the inlined version of the call.
|
||||
// We do this before the strict f.p. check below because the
|
||||
// intrinsics handle strict f.p. correctly.
|
||||
CallGenerator* cg_intrinsic = NULL;
|
||||
if (allow_inline && allow_intrinsics) {
|
||||
CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
|
||||
if (cg != NULL) {
|
||||
@ -121,7 +122,16 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
|
||||
}
|
||||
}
|
||||
return cg;
|
||||
|
||||
// If intrinsic does the virtual dispatch, we try to use the type profile
|
||||
// first, and hopefully inline it as the regular virtual call below.
|
||||
// We will retry the intrinsic if nothing had claimed it afterwards.
|
||||
if (cg->does_virtual_dispatch()) {
|
||||
cg_intrinsic = cg;
|
||||
cg = NULL;
|
||||
} else {
|
||||
return cg;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -266,6 +276,13 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing claimed the intrinsic, we go with straight-forward inlining
|
||||
// for already discovered intrinsic.
|
||||
if (allow_inline && allow_intrinsics && cg_intrinsic != NULL) {
|
||||
assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
|
||||
return cg_intrinsic;
|
||||
}
|
||||
|
||||
// There was no special inlining tactic, or it bailed out.
|
||||
// Use a more generic tactic, like a simple call.
|
||||
if (call_does_dispatch) {
|
||||
|
@ -3849,9 +3849,9 @@ void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
|
||||
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
|
||||
false, NULL, 0);
|
||||
const TypePtr* value_field_type = string_type->add_offset(value_offset);
|
||||
int value_field_idx = C->get_alias_index(value_field_type);
|
||||
store_to_memory(ctrl, basic_plus_adr(str, value_offset),
|
||||
value, T_OBJECT, value_field_idx);
|
||||
|
||||
store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
|
||||
value, TypeAryPtr::CHARS, T_OBJECT);
|
||||
}
|
||||
|
||||
void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
|
||||
|
@ -76,6 +76,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
|
||||
if( !i1->is_Bool() ) return NULL;
|
||||
BoolNode *b = i1->as_Bool();
|
||||
Node *cmp = b->in(1);
|
||||
if( cmp->is_FlagsProj() ) return NULL;
|
||||
if( !cmp->is_Cmp() ) return NULL;
|
||||
i1 = cmp->in(1);
|
||||
if( i1 == NULL || !i1->is_Phi() ) return NULL;
|
||||
|
@ -472,6 +472,13 @@ Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &re
|
||||
break;
|
||||
}
|
||||
|
||||
// For nodes that produce a FlagsProj, make the node adjacent to the
|
||||
// use of the FlagsProj
|
||||
if (use->is_FlagsProj() && get_block_for_node(use) == block) {
|
||||
found_machif = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// More than this instruction pending for successor to be ready,
|
||||
// don't choose this if other opportunities are ready
|
||||
if (ready_cnt.at(use->_idx) > 1)
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "opto/callGenerator.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/idealKit.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
@ -46,19 +47,22 @@ class LibraryIntrinsic : public InlineCallGenerator {
|
||||
private:
|
||||
bool _is_virtual;
|
||||
bool _is_predicted;
|
||||
bool _does_virtual_dispatch;
|
||||
vmIntrinsics::ID _intrinsic_id;
|
||||
|
||||
public:
|
||||
LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, vmIntrinsics::ID id)
|
||||
LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id)
|
||||
: InlineCallGenerator(m),
|
||||
_is_virtual(is_virtual),
|
||||
_is_predicted(is_predicted),
|
||||
_does_virtual_dispatch(does_virtual_dispatch),
|
||||
_intrinsic_id(id)
|
||||
{
|
||||
}
|
||||
virtual bool is_intrinsic() const { return true; }
|
||||
virtual bool is_virtual() const { return _is_virtual; }
|
||||
virtual bool is_predicted() const { return _is_predicted; }
|
||||
virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual Node* generate_predicate(JVMState* jvms);
|
||||
vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
|
||||
@ -199,6 +203,8 @@ class LibraryCallKit : public GraphKit {
|
||||
bool inline_math_native(vmIntrinsics::ID id);
|
||||
bool inline_trig(vmIntrinsics::ID id);
|
||||
bool inline_math(vmIntrinsics::ID id);
|
||||
bool inline_math_mathExact(Node* math);
|
||||
bool inline_math_addExact();
|
||||
bool inline_exp();
|
||||
bool inline_pow();
|
||||
void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
|
||||
@ -352,6 +358,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||
}
|
||||
|
||||
bool is_predicted = false;
|
||||
bool does_virtual_dispatch = false;
|
||||
|
||||
switch (id) {
|
||||
case vmIntrinsics::_compareTo:
|
||||
@ -378,8 +385,10 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||
break;
|
||||
case vmIntrinsics::_hashCode:
|
||||
if (!InlineObjectHash) return NULL;
|
||||
does_virtual_dispatch = true;
|
||||
break;
|
||||
case vmIntrinsics::_clone:
|
||||
does_virtual_dispatch = true;
|
||||
case vmIntrinsics::_copyOf:
|
||||
case vmIntrinsics::_copyOfRange:
|
||||
if (!InlineObjectCopy) return NULL;
|
||||
@ -498,6 +507,15 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||
if (!UseCRC32Intrinsics) return NULL;
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_addExact:
|
||||
if (!Matcher::match_rule_supported(Op_AddExactI)) {
|
||||
return NULL;
|
||||
}
|
||||
if (!UseMathExactIntrinsics) {
|
||||
return NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
|
||||
assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
|
||||
@ -529,7 +547,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||
if (!InlineUnsafeOps) return NULL;
|
||||
}
|
||||
|
||||
return new LibraryIntrinsic(m, is_virtual, is_predicted, (vmIntrinsics::ID) id);
|
||||
return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id);
|
||||
}
|
||||
|
||||
//----------------------register_library_intrinsics-----------------------
|
||||
@ -668,6 +686,8 @@ bool LibraryCallKit::try_to_inline() {
|
||||
case vmIntrinsics::_min:
|
||||
case vmIntrinsics::_max: return inline_min_max(intrinsic_id());
|
||||
|
||||
case vmIntrinsics::_addExact: return inline_math_addExact();
|
||||
|
||||
case vmIntrinsics::_arraycopy: return inline_arraycopy();
|
||||
|
||||
case vmIntrinsics::_compareTo: return inline_string_compareTo();
|
||||
@ -1911,6 +1931,45 @@ bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LibraryCallKit::inline_math_mathExact(Node* math) {
|
||||
Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node));
|
||||
Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node));
|
||||
|
||||
Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) );
|
||||
IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
|
||||
Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
|
||||
Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
|
||||
|
||||
{
|
||||
PreserveJVMState pjvms(this);
|
||||
PreserveReexecuteState preexecs(this);
|
||||
jvms()->set_should_reexecute(true);
|
||||
|
||||
set_control(slow_path);
|
||||
set_i_o(i_o());
|
||||
|
||||
uncommon_trap(Deoptimization::Reason_intrinsic,
|
||||
Deoptimization::Action_none);
|
||||
}
|
||||
|
||||
set_control(fast_path);
|
||||
set_result(result);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LibraryCallKit::inline_math_addExact() {
|
||||
Node* arg1 = argument(0);
|
||||
Node* arg2 = argument(1);
|
||||
|
||||
Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
|
||||
if (add->Opcode() == Op_AddExactI) {
|
||||
return inline_math_mathExact(add);
|
||||
} else {
|
||||
set_result(add);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Node*
|
||||
LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
|
||||
// These are the candidate return value:
|
||||
|
@ -776,6 +776,9 @@ bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
|
||||
continue; // not RC
|
||||
|
||||
Node *cmp = bol->in(1);
|
||||
if (cmp->is_FlagsProj()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Node *rc_exp = cmp->in(1);
|
||||
Node *limit = cmp->in(2);
|
||||
|
@ -2355,7 +2355,8 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
opc == Op_Catch ||
|
||||
opc == Op_CatchProj ||
|
||||
opc == Op_Jump ||
|
||||
opc == Op_JumpProj) {
|
||||
opc == Op_JumpProj ||
|
||||
opc == Op_FlagsProj) {
|
||||
#if !defined(PRODUCT)
|
||||
if (TracePartialPeeling) {
|
||||
tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
|
||||
|
@ -1964,6 +1964,7 @@ void Matcher::find_shared( Node *n ) {
|
||||
case Op_Catch:
|
||||
case Op_CatchProj:
|
||||
case Op_CProj:
|
||||
case Op_FlagsProj:
|
||||
case Op_JumpProj:
|
||||
case Op_JProj:
|
||||
case Op_NeverBranch:
|
||||
|
@ -337,6 +337,9 @@ public:
|
||||
// Register for MODL projection of divmodL
|
||||
static RegMask modL_proj_mask();
|
||||
|
||||
static const RegMask mathExactI_result_proj_mask();
|
||||
static const RegMask mathExactI_flags_proj_mask();
|
||||
|
||||
// Use hardware DIV instruction when it is faster than
|
||||
// a code which use multiply for division by constant.
|
||||
static bool use_asm_for_ldiv_by_con( jlong divisor );
|
||||
|
143
hotspot/src/share/vm/opto/mathexactnode.cpp
Normal file
143
hotspot/src/share/vm/opto/mathexactnode.cpp
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
|
||||
MathExactNode::MathExactNode(Node* ctrl, Node* n1, Node* n2) : MultiNode(3) {
|
||||
init_req(0, ctrl);
|
||||
init_req(1, n1);
|
||||
init_req(2, n2);
|
||||
}
|
||||
|
||||
Node* AddExactINode::match(const ProjNode* proj, const Matcher* m) {
|
||||
uint ideal_reg = proj->ideal_reg();
|
||||
RegMask rm;
|
||||
if (proj->_con == result_proj_node) {
|
||||
rm = m->mathExactI_result_proj_mask();
|
||||
} else {
|
||||
assert(proj->_con == flags_proj_node, "must be result or flags");
|
||||
assert(ideal_reg == Op_RegFlags, "sanity");
|
||||
rm = m->mathExactI_flags_proj_mask();
|
||||
}
|
||||
return new (m->C) MachProjNode(this, proj->_con, rm, ideal_reg);
|
||||
}
|
||||
|
||||
// If the MathExactNode won't overflow we have to replace the
|
||||
// FlagsProjNode and ProjNode that is generated by the MathExactNode
|
||||
Node* MathExactNode::no_overflow(PhaseGVN *phase, Node* new_result) {
|
||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||
if (igvn) {
|
||||
ProjNode* result = result_node();
|
||||
ProjNode* flags = flags_node();
|
||||
|
||||
if (result != NULL) {
|
||||
igvn->replace_node(result, new_result);
|
||||
}
|
||||
|
||||
if (flags != NULL) {
|
||||
BoolNode* bolnode = (BoolNode *) flags->unique_out();
|
||||
switch (bolnode->_test._test) {
|
||||
case BoolTest::overflow:
|
||||
// if the check is for overflow - never taken
|
||||
igvn->replace_node(bolnode, phase->intcon(0));
|
||||
break;
|
||||
case BoolTest::no_overflow:
|
||||
// if the check is for no overflow - always taken
|
||||
igvn->replace_node(bolnode, phase->intcon(1));
|
||||
break;
|
||||
default:
|
||||
fatal("Unexpected value of BoolTest");
|
||||
break;
|
||||
}
|
||||
flags->del_req(0);
|
||||
}
|
||||
}
|
||||
return new_result;
|
||||
}
|
||||
|
||||
Node *AddExactINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node *arg1 = in(1);
|
||||
Node *arg2 = in(2);
|
||||
|
||||
const Type* type1 = phase->type(arg1);
|
||||
const Type* type2 = phase->type(arg2);
|
||||
|
||||
if (type1 != Type::TOP && type1->singleton() &&
|
||||
type2 != Type::TOP && type2->singleton()) {
|
||||
jint val1 = arg1->get_int();
|
||||
jint val2 = arg2->get_int();
|
||||
jint result = val1 + val2;
|
||||
// Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result
|
||||
if ( (((val1 ^ result) & (val2 ^ result)) >= 0)) {
|
||||
Node* con_result = ConINode::make(phase->C, result);
|
||||
return no_overflow(phase, con_result);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (type1 == TypeInt::ZERO) { // (Add 0 x) == x
|
||||
Node* add_result = new (phase->C) AddINode(arg1, arg2);
|
||||
return no_overflow(phase, add_result);
|
||||
}
|
||||
|
||||
if (type2 == TypeInt::ZERO) { // (Add x 0) == x
|
||||
Node* add_result = new (phase->C) AddINode(arg1, arg2);
|
||||
return no_overflow(phase, add_result);
|
||||
}
|
||||
|
||||
if (type2->singleton()) {
|
||||
return NULL; // no change - keep constant on the right
|
||||
}
|
||||
|
||||
if (type1->singleton()) {
|
||||
// Make it x + Constant - move constant to the right
|
||||
swap_edges(1, 2);
|
||||
return this;
|
||||
}
|
||||
|
||||
if (arg2->is_Load()) {
|
||||
return NULL; // no change - keep load on the right
|
||||
}
|
||||
|
||||
if (arg1->is_Load()) {
|
||||
// Make it x + Load - move load to the right
|
||||
swap_edges(1, 2);
|
||||
return this;
|
||||
}
|
||||
|
||||
if (arg1->_idx > arg2->_idx) {
|
||||
// Sort the edges
|
||||
swap_edges(1, 2);
|
||||
return this;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
81
hotspot/src/share/vm/opto/mathexactnode.hpp
Normal file
81
hotspot/src/share/vm/opto/mathexactnode.hpp
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OPTO_MATHEXACTNODE_HPP
|
||||
#define SHARE_VM_OPTO_MATHEXACTNODE_HPP
|
||||
|
||||
#include "opto/multnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
#include "opto/type.hpp"
|
||||
|
||||
class Node;
|
||||
|
||||
class PhaseGVN;
|
||||
class PhaseTransform;
|
||||
|
||||
class MathExactNode : public MultiNode {
|
||||
public:
|
||||
MathExactNode(Node* ctrl, Node* in1, Node* in2);
|
||||
enum {
|
||||
result_proj_node = 0,
|
||||
flags_proj_node = 1
|
||||
};
|
||||
virtual int Opcode() const;
|
||||
virtual Node* Identity(PhaseTransform* phase) { return this; }
|
||||
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) { return NULL; }
|
||||
virtual const Type* Value(PhaseTransform* phase) const { return bottom_type(); }
|
||||
virtual uint hash() const { return Node::hash(); }
|
||||
virtual bool is_CFG() const { return false; }
|
||||
virtual uint ideal_reg() const { return NotAMachineReg; }
|
||||
|
||||
ProjNode* result_node() { return proj_out(result_proj_node); }
|
||||
ProjNode* flags_node() { return proj_out(flags_proj_node); }
|
||||
protected:
|
||||
Node* no_overflow(PhaseGVN *phase, Node* new_result);
|
||||
};
|
||||
|
||||
class AddExactINode : public MathExactNode {
|
||||
public:
|
||||
AddExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {}
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const { return TypeTuple::INT_CC_PAIR; }
|
||||
virtual Node* match(const ProjNode* proj, const Matcher* m);
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
};
|
||||
|
||||
class FlagsProjNode : public ProjNode {
|
||||
public:
|
||||
FlagsProjNode(Node* src, uint con) : ProjNode(src, con) {
|
||||
init_class_id(Class_FlagsProj);
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual bool is_CFG() const { return false; }
|
||||
virtual const Type* bottom_type() const { return TypeInt::CC; }
|
||||
virtual uint ideal_reg() const { return Op_RegFlags; }
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/multnode.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
@ -46,15 +47,21 @@ ProjNode* MultiNode::proj_out(uint which_proj) const {
|
||||
assert(Opcode() != Op_If || outcnt() == 2, "bad if #1");
|
||||
for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
|
||||
Node *p = fast_out(i);
|
||||
if( !p->is_Proj() ) {
|
||||
if (p->is_Proj()) {
|
||||
ProjNode *proj = p->as_Proj();
|
||||
if (proj->_con == which_proj) {
|
||||
assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2");
|
||||
return proj;
|
||||
}
|
||||
} else if (p->is_FlagsProj()) {
|
||||
FlagsProjNode *proj = p->as_FlagsProj();
|
||||
if (proj->_con == which_proj) {
|
||||
return proj;
|
||||
}
|
||||
} else {
|
||||
assert(p == this && this->is_Start(), "else must be proj");
|
||||
continue;
|
||||
}
|
||||
ProjNode *proj = p->as_Proj();
|
||||
if( proj->_con == which_proj ) {
|
||||
assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2");
|
||||
return proj;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -69,6 +69,7 @@ class EncodePNode;
|
||||
class EncodePKlassNode;
|
||||
class FastLockNode;
|
||||
class FastUnlockNode;
|
||||
class FlagsProjNode;
|
||||
class IfNode;
|
||||
class IfFalseNode;
|
||||
class IfTrueNode;
|
||||
@ -623,6 +624,7 @@ public:
|
||||
DEFINE_CLASS_ID(Cmp, Sub, 0)
|
||||
DEFINE_CLASS_ID(FastLock, Cmp, 0)
|
||||
DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
|
||||
DEFINE_CLASS_ID(FlagsProj, Cmp, 2)
|
||||
|
||||
DEFINE_CLASS_ID(MergeMem, Node, 7)
|
||||
DEFINE_CLASS_ID(Bool, Node, 8)
|
||||
@ -726,6 +728,7 @@ public:
|
||||
DEFINE_CLASS_QUERY(EncodePKlass)
|
||||
DEFINE_CLASS_QUERY(FastLock)
|
||||
DEFINE_CLASS_QUERY(FastUnlock)
|
||||
DEFINE_CLASS_QUERY(FlagsProj)
|
||||
DEFINE_CLASS_QUERY(If)
|
||||
DEFINE_CLASS_QUERY(IfFalse)
|
||||
DEFINE_CLASS_QUERY(IfTrue)
|
||||
|
@ -1064,7 +1064,7 @@ const Type *BoolTest::cc2logical( const Type *CC ) const {
|
||||
// Print special per-node info
|
||||
#ifndef PRODUCT
|
||||
void BoolTest::dump_on(outputStream *st) const {
|
||||
const char *msg[] = {"eq","gt","??","lt","ne","le","??","ge"};
|
||||
const char *msg[] = {"eq","gt","of","lt","ne","le","nof","ge"};
|
||||
st->print(msg[_test]);
|
||||
}
|
||||
#endif
|
||||
@ -1126,7 +1126,7 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node *cmp = in(1);
|
||||
if( !cmp->is_Sub() ) return NULL;
|
||||
int cop = cmp->Opcode();
|
||||
if( cop == Op_FastLock || cop == Op_FastUnlock ) return NULL;
|
||||
if( cop == Op_FastLock || cop == Op_FastUnlock || cop == Op_FlagsProj) return NULL;
|
||||
Node *cmp1 = cmp->in(1);
|
||||
Node *cmp2 = cmp->in(2);
|
||||
if( !cmp1 ) return NULL;
|
||||
|
@ -263,16 +263,16 @@ public:
|
||||
// We pick the values as 3 bits; the low order 2 bits we compare against the
|
||||
// condition codes, the high bit flips the sense of the result.
|
||||
struct BoolTest VALUE_OBJ_CLASS_SPEC {
|
||||
enum mask { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1, illegal = 8 };
|
||||
enum mask { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1, overflow = 2, no_overflow = 6, illegal = 8 };
|
||||
mask _test;
|
||||
BoolTest( mask btm ) : _test(btm) {}
|
||||
const Type *cc2logical( const Type *CC ) const;
|
||||
// Commute the test. I use a small table lookup. The table is created as
|
||||
// a simple char array where each element is the ASCII version of a 'mask'
|
||||
// enum from above.
|
||||
mask commute( ) const { return mask("038147858"[_test]-'0'); }
|
||||
mask commute( ) const { return mask("032147658"[_test]-'0'); }
|
||||
mask negate( ) const { return mask(_test^4); }
|
||||
bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le); }
|
||||
bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le || _test == BoolTest::overflow); }
|
||||
#ifndef PRODUCT
|
||||
void dump_on(outputStream *st) const;
|
||||
#endif
|
||||
|
@ -430,6 +430,11 @@ void Type::Initialize_shared(Compile* current) {
|
||||
longpair[1] = TypeLong::LONG;
|
||||
TypeTuple::LONG_PAIR = TypeTuple::make(2, longpair);
|
||||
|
||||
const Type **intccpair = TypeTuple::fields(2);
|
||||
intccpair[0] = TypeInt::INT;
|
||||
intccpair[1] = TypeInt::CC;
|
||||
TypeTuple::INT_CC_PAIR = TypeTuple::make(2, intccpair);
|
||||
|
||||
_const_basic_type[T_NARROWOOP] = TypeNarrowOop::BOTTOM;
|
||||
_const_basic_type[T_NARROWKLASS] = Type::BOTTOM;
|
||||
_const_basic_type[T_BOOLEAN] = TypeInt::BOOL;
|
||||
@ -1646,6 +1651,7 @@ const TypeTuple *TypeTuple::STORECONDITIONAL;
|
||||
const TypeTuple *TypeTuple::START_I2C;
|
||||
const TypeTuple *TypeTuple::INT_PAIR;
|
||||
const TypeTuple *TypeTuple::LONG_PAIR;
|
||||
const TypeTuple *TypeTuple::INT_CC_PAIR;
|
||||
|
||||
|
||||
//------------------------------make-------------------------------------------
|
||||
|
@ -584,6 +584,7 @@ public:
|
||||
static const TypeTuple *START_I2C;
|
||||
static const TypeTuple *INT_PAIR;
|
||||
static const TypeTuple *LONG_PAIR;
|
||||
static const TypeTuple *INT_CC_PAIR;
|
||||
#ifndef PRODUCT
|
||||
virtual void dump2( Dict &d, uint, outputStream *st ) const; // Specialized per-Type dumping
|
||||
#endif
|
||||
|
@ -3984,13 +3984,13 @@ extern "C" {
|
||||
|
||||
JVM_LEAF(jboolean, JVM_AccessVMBooleanFlag(const char* name, jboolean* value, jboolean is_get))
|
||||
JVMWrapper("JVM_AccessBoolVMFlag");
|
||||
return is_get ? CommandLineFlags::boolAt((char*) name, (bool*) value) : CommandLineFlags::boolAtPut((char*) name, (bool*) value, INTERNAL);
|
||||
return is_get ? CommandLineFlags::boolAt((char*) name, (bool*) value) : CommandLineFlags::boolAtPut((char*) name, (bool*) value, Flag::INTERNAL);
|
||||
JVM_END
|
||||
|
||||
JVM_LEAF(jboolean, JVM_AccessVMIntFlag(const char* name, jint* value, jboolean is_get))
|
||||
JVMWrapper("JVM_AccessVMIntFlag");
|
||||
intx v;
|
||||
jboolean result = is_get ? CommandLineFlags::intxAt((char*) name, &v) : CommandLineFlags::intxAtPut((char*) name, &v, INTERNAL);
|
||||
jboolean result = is_get ? CommandLineFlags::intxAt((char*) name, &v) : CommandLineFlags::intxAtPut((char*) name, &v, Flag::INTERNAL);
|
||||
*value = (jint)v;
|
||||
return result;
|
||||
JVM_END
|
||||
|
@ -625,11 +625,11 @@ void Arguments::describe_range_error(ArgsRange errcode) {
|
||||
}
|
||||
}
|
||||
|
||||
static bool set_bool_flag(char* name, bool value, FlagValueOrigin origin) {
|
||||
static bool set_bool_flag(char* name, bool value, Flag::Flags origin) {
|
||||
return CommandLineFlags::boolAtPut(name, &value, origin);
|
||||
}
|
||||
|
||||
static bool set_fp_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
|
||||
static bool set_fp_numeric_flag(char* name, char* value, Flag::Flags origin) {
|
||||
double v;
|
||||
if (sscanf(value, "%lf", &v) != 1) {
|
||||
return false;
|
||||
@ -641,7 +641,7 @@ static bool set_fp_numeric_flag(char* name, char* value, FlagValueOrigin origin)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
|
||||
static bool set_numeric_flag(char* name, char* value, Flag::Flags origin) {
|
||||
julong v;
|
||||
intx intx_v;
|
||||
bool is_neg = false;
|
||||
@ -674,14 +674,14 @@ static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool set_string_flag(char* name, const char* value, FlagValueOrigin origin) {
|
||||
static bool set_string_flag(char* name, const char* value, Flag::Flags origin) {
|
||||
if (!CommandLineFlags::ccstrAtPut(name, &value, origin)) return false;
|
||||
// Contract: CommandLineFlags always returns a pointer that needs freeing.
|
||||
FREE_C_HEAP_ARRAY(char, value, mtInternal);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool append_to_string_flag(char* name, const char* new_value, FlagValueOrigin origin) {
|
||||
static bool append_to_string_flag(char* name, const char* new_value, Flag::Flags origin) {
|
||||
const char* old_value = "";
|
||||
if (!CommandLineFlags::ccstrAt(name, &old_value)) return false;
|
||||
size_t old_len = old_value != NULL ? strlen(old_value) : 0;
|
||||
@ -709,7 +709,7 @@ static bool append_to_string_flag(char* name, const char* new_value, FlagValueOr
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Arguments::parse_argument(const char* arg, FlagValueOrigin origin) {
|
||||
bool Arguments::parse_argument(const char* arg, Flag::Flags origin) {
|
||||
|
||||
// range of acceptable characters spelled out for portability reasons
|
||||
#define NAME_RANGE "[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_]"
|
||||
@ -850,7 +850,7 @@ void Arguments::print_jvm_args_on(outputStream* st) {
|
||||
}
|
||||
|
||||
bool Arguments::process_argument(const char* arg,
|
||||
jboolean ignore_unrecognized, FlagValueOrigin origin) {
|
||||
jboolean ignore_unrecognized, Flag::Flags origin) {
|
||||
|
||||
JDK_Version since = JDK_Version();
|
||||
|
||||
@ -904,7 +904,7 @@ bool Arguments::process_argument(const char* arg,
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Did you mean '%s%s%s'?\n",
|
||||
(fuzzy_matched->is_bool()) ? "(+/-)" : "",
|
||||
fuzzy_matched->name,
|
||||
fuzzy_matched->_name,
|
||||
(fuzzy_matched->is_bool()) ? "" : "=<value>");
|
||||
}
|
||||
}
|
||||
@ -952,7 +952,7 @@ bool Arguments::process_settings_file(const char* file_name, bool should_exist,
|
||||
// this allows a way to include spaces in string-valued options
|
||||
token[pos] = '\0';
|
||||
logOption(token);
|
||||
result &= process_argument(token, ignore_unrecognized, CONFIG_FILE);
|
||||
result &= process_argument(token, ignore_unrecognized, Flag::CONFIG_FILE);
|
||||
build_jvm_flags(token);
|
||||
pos = 0;
|
||||
in_white_space = true;
|
||||
@ -970,7 +970,7 @@ bool Arguments::process_settings_file(const char* file_name, bool should_exist,
|
||||
}
|
||||
if (pos > 0) {
|
||||
token[pos] = '\0';
|
||||
result &= process_argument(token, ignore_unrecognized, CONFIG_FILE);
|
||||
result &= process_argument(token, ignore_unrecognized, Flag::CONFIG_FILE);
|
||||
build_jvm_flags(token);
|
||||
}
|
||||
fclose(stream);
|
||||
@ -1132,6 +1132,9 @@ void Arguments::set_tiered_flags() {
|
||||
Tier3InvokeNotifyFreqLog = 0;
|
||||
Tier4InvocationThreshold = 0;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
|
||||
FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
@ -2337,6 +2340,10 @@ bool Arguments::check_vm_args_consistency() {
|
||||
(2*G)/M);
|
||||
status = false;
|
||||
}
|
||||
|
||||
status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
|
||||
status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -2438,7 +2445,7 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs* args) {
|
||||
}
|
||||
|
||||
// Parse JavaVMInitArgs structure passed in
|
||||
result = parse_each_vm_init_arg(args, &scp, &scp_assembly_required, COMMAND_LINE);
|
||||
result = parse_each_vm_init_arg(args, &scp, &scp_assembly_required, Flag::COMMAND_LINE);
|
||||
if (result != JNI_OK) {
|
||||
return result;
|
||||
}
|
||||
@ -2510,7 +2517,7 @@ bool valid_hprof_or_jdwp_agent(char *name, bool is_path) {
|
||||
jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
SysClassPath* scp_p,
|
||||
bool* scp_assembly_required_p,
|
||||
FlagValueOrigin origin) {
|
||||
Flag::Flags origin) {
|
||||
// Remaining part of option string
|
||||
const char* tail;
|
||||
|
||||
@ -3333,7 +3340,7 @@ jint Arguments::parse_options_environment_variable(const char* name, SysClassPat
|
||||
}
|
||||
}
|
||||
|
||||
return(parse_each_vm_init_arg(&vm_args, scp_p, scp_assembly_required_p, ENVIRON_VAR));
|
||||
return(parse_each_vm_init_arg(&vm_args, scp_p, scp_assembly_required_p, Flag::ENVIRON_VAR));
|
||||
}
|
||||
return JNI_OK;
|
||||
}
|
||||
|
@ -360,15 +360,15 @@ class Arguments : AllStatic {
|
||||
|
||||
// Argument parsing
|
||||
static void do_pd_flag_adjustments();
|
||||
static bool parse_argument(const char* arg, FlagValueOrigin origin);
|
||||
static bool process_argument(const char* arg, jboolean ignore_unrecognized, FlagValueOrigin origin);
|
||||
static bool parse_argument(const char* arg, Flag::Flags origin);
|
||||
static bool process_argument(const char* arg, jboolean ignore_unrecognized, Flag::Flags origin);
|
||||
static void process_java_launcher_argument(const char*, void*);
|
||||
static void process_java_compiler_argument(char* arg);
|
||||
static jint parse_options_environment_variable(const char* name, SysClassPath* scp_p, bool* scp_assembly_required_p);
|
||||
static jint parse_java_tool_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p);
|
||||
static jint parse_java_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p);
|
||||
static jint parse_vm_init_args(const JavaVMInitArgs* args);
|
||||
static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, FlagValueOrigin origin);
|
||||
static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, Flag::Flags origin);
|
||||
static jint finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required);
|
||||
static bool is_bad_option(const JavaVMOption* option, jboolean ignore,
|
||||
const char* option_type);
|
||||
|
@ -62,26 +62,174 @@ ARCH_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, \
|
||||
MATERIALIZE_FLAGS_EXT
|
||||
|
||||
|
||||
void Flag::check_writable() {
|
||||
if (is_constant_in_binary()) {
|
||||
fatal(err_msg("flag is constant: %s", _name));
|
||||
}
|
||||
}
|
||||
|
||||
bool Flag::is_bool() const {
|
||||
return strcmp(_type, "bool") == 0;
|
||||
}
|
||||
|
||||
bool Flag::get_bool() const {
|
||||
return *((bool*) _addr);
|
||||
}
|
||||
|
||||
void Flag::set_bool(bool value) {
|
||||
check_writable();
|
||||
*((bool*) _addr) = value;
|
||||
}
|
||||
|
||||
bool Flag::is_intx() const {
|
||||
return strcmp(_type, "intx") == 0;
|
||||
}
|
||||
|
||||
intx Flag::get_intx() const {
|
||||
return *((intx*) _addr);
|
||||
}
|
||||
|
||||
void Flag::set_intx(intx value) {
|
||||
check_writable();
|
||||
*((intx*) _addr) = value;
|
||||
}
|
||||
|
||||
bool Flag::is_uintx() const {
|
||||
return strcmp(_type, "uintx") == 0;
|
||||
}
|
||||
|
||||
uintx Flag::get_uintx() const {
|
||||
return *((uintx*) _addr);
|
||||
}
|
||||
|
||||
void Flag::set_uintx(uintx value) {
|
||||
check_writable();
|
||||
*((uintx*) _addr) = value;
|
||||
}
|
||||
|
||||
bool Flag::is_uint64_t() const {
|
||||
return strcmp(_type, "uint64_t") == 0;
|
||||
}
|
||||
|
||||
uint64_t Flag::get_uint64_t() const {
|
||||
return *((uint64_t*) _addr);
|
||||
}
|
||||
|
||||
void Flag::set_uint64_t(uint64_t value) {
|
||||
check_writable();
|
||||
*((uint64_t*) _addr) = value;
|
||||
}
|
||||
|
||||
bool Flag::is_double() const {
|
||||
return strcmp(_type, "double") == 0;
|
||||
}
|
||||
|
||||
double Flag::get_double() const {
|
||||
return *((double*) _addr);
|
||||
}
|
||||
|
||||
void Flag::set_double(double value) {
|
||||
check_writable();
|
||||
*((double*) _addr) = value;
|
||||
}
|
||||
|
||||
bool Flag::is_ccstr() const {
|
||||
return strcmp(_type, "ccstr") == 0 || strcmp(_type, "ccstrlist") == 0;
|
||||
}
|
||||
|
||||
bool Flag::ccstr_accumulates() const {
|
||||
return strcmp(_type, "ccstrlist") == 0;
|
||||
}
|
||||
|
||||
ccstr Flag::get_ccstr() const {
|
||||
return *((ccstr*) _addr);
|
||||
}
|
||||
|
||||
void Flag::set_ccstr(ccstr value) {
|
||||
check_writable();
|
||||
*((ccstr*) _addr) = value;
|
||||
}
|
||||
|
||||
|
||||
Flag::Flags Flag::get_origin() {
|
||||
return Flags(_flags & VALUE_ORIGIN_MASK);
|
||||
}
|
||||
|
||||
void Flag::set_origin(Flags origin) {
|
||||
assert((origin & VALUE_ORIGIN_MASK) == origin, "sanity");
|
||||
_flags = Flags((_flags & ~VALUE_ORIGIN_MASK) | origin);
|
||||
}
|
||||
|
||||
bool Flag::is_default() {
|
||||
return (get_origin() == DEFAULT);
|
||||
}
|
||||
|
||||
bool Flag::is_ergonomic() {
|
||||
return (get_origin() == ERGONOMIC);
|
||||
}
|
||||
|
||||
bool Flag::is_command_line() {
|
||||
return (get_origin() == COMMAND_LINE);
|
||||
}
|
||||
|
||||
bool Flag::is_product() const {
|
||||
return (_flags & KIND_PRODUCT) != 0;
|
||||
}
|
||||
|
||||
bool Flag::is_manageable() const {
|
||||
return (_flags & KIND_MANAGEABLE) != 0;
|
||||
}
|
||||
|
||||
bool Flag::is_diagnostic() const {
|
||||
return (_flags & KIND_DIAGNOSTIC) != 0;
|
||||
}
|
||||
|
||||
bool Flag::is_experimental() const {
|
||||
return (_flags & KIND_EXPERIMENTAL) != 0;
|
||||
}
|
||||
|
||||
bool Flag::is_notproduct() const {
|
||||
return (_flags & KIND_NOT_PRODUCT) != 0;
|
||||
}
|
||||
|
||||
bool Flag::is_develop() const {
|
||||
return (_flags & KIND_DEVELOP) != 0;
|
||||
}
|
||||
|
||||
bool Flag::is_read_write() const {
|
||||
return (_flags & KIND_READ_WRITE) != 0;
|
||||
}
|
||||
|
||||
bool Flag::is_commercial() const {
|
||||
return (_flags & KIND_COMMERCIAL) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if this flag is a constant in the binary. Right now this is
|
||||
* true for notproduct and develop flags in product builds.
|
||||
*/
|
||||
bool Flag::is_constant_in_binary() const {
|
||||
#ifdef PRODUCT
|
||||
return is_notproduct() || is_develop();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Flag::is_unlocker() const {
|
||||
return strcmp(name, "UnlockDiagnosticVMOptions") == 0 ||
|
||||
strcmp(name, "UnlockExperimentalVMOptions") == 0 ||
|
||||
return strcmp(_name, "UnlockDiagnosticVMOptions") == 0 ||
|
||||
strcmp(_name, "UnlockExperimentalVMOptions") == 0 ||
|
||||
is_unlocker_ext();
|
||||
}
|
||||
|
||||
bool Flag::is_unlocked() const {
|
||||
if (strcmp(kind, "{diagnostic}") == 0 ||
|
||||
strcmp(kind, "{C2 diagnostic}") == 0 ||
|
||||
strcmp(kind, "{ARCH diagnostic}") == 0 ||
|
||||
strcmp(kind, "{Shark diagnostic}") == 0) {
|
||||
if (is_diagnostic()) {
|
||||
return UnlockDiagnosticVMOptions;
|
||||
} else if (strcmp(kind, "{experimental}") == 0 ||
|
||||
strcmp(kind, "{C2 experimental}") == 0 ||
|
||||
strcmp(kind, "{ARCH experimental}") == 0 ||
|
||||
strcmp(kind, "{Shark experimental}") == 0) {
|
||||
return UnlockExperimentalVMOptions;
|
||||
} else {
|
||||
return is_unlocked_ext();
|
||||
}
|
||||
if (is_experimental()) {
|
||||
return UnlockExperimentalVMOptions;
|
||||
}
|
||||
return is_unlocked_ext();
|
||||
}
|
||||
|
||||
// Get custom message for this locked flag, or return NULL if
|
||||
@ -91,16 +239,14 @@ void Flag::get_locked_message(char* buf, int buflen) const {
|
||||
}
|
||||
|
||||
bool Flag::is_writeable() const {
|
||||
return strcmp(kind, "{manageable}") == 0 ||
|
||||
strcmp(kind, "{product rw}") == 0 ||
|
||||
is_writeable_ext();
|
||||
return is_manageable() || (is_product() && is_read_write()) || is_writeable_ext();
|
||||
}
|
||||
|
||||
// All flags except "manageable" are assumed to be internal flags.
|
||||
// Long term, we need to define a mechanism to specify which flags
|
||||
// are external/stable and change this function accordingly.
|
||||
bool Flag::is_external() const {
|
||||
return strcmp(kind, "{manageable}") == 0 || is_external_ext();
|
||||
return is_manageable() || is_external_ext();
|
||||
}
|
||||
|
||||
|
||||
@ -108,53 +254,113 @@ bool Flag::is_external() const {
|
||||
#define FORMAT_BUFFER_LEN 16
|
||||
|
||||
void Flag::print_on(outputStream* st, bool withComments) {
|
||||
st->print("%9s %-40s %c= ", type, name, (origin != DEFAULT ? ':' : ' '));
|
||||
if (is_bool()) st->print("%-16s", get_bool() ? "true" : "false");
|
||||
if (is_intx()) st->print("%-16ld", get_intx());
|
||||
if (is_uintx()) st->print("%-16lu", get_uintx());
|
||||
if (is_uint64_t()) st->print("%-16lu", get_uint64_t());
|
||||
if (is_double()) st->print("%-16f", get_double());
|
||||
|
||||
if (is_ccstr()) {
|
||||
const char* cp = get_ccstr();
|
||||
if (cp != NULL) {
|
||||
const char* eol;
|
||||
while ((eol = strchr(cp, '\n')) != NULL) {
|
||||
char format_buffer[FORMAT_BUFFER_LEN];
|
||||
size_t llen = pointer_delta(eol, cp, sizeof(char));
|
||||
jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
|
||||
"%%." SIZE_FORMAT "s", llen);
|
||||
st->print(format_buffer, cp);
|
||||
st->cr();
|
||||
cp = eol+1;
|
||||
st->print("%5s %-35s += ", "", name);
|
||||
}
|
||||
st->print("%-16s", cp);
|
||||
}
|
||||
else st->print("%-16s", "");
|
||||
// Don't print notproduct and develop flags in a product build.
|
||||
if (is_constant_in_binary()) {
|
||||
return;
|
||||
}
|
||||
st->print("%-20s", kind);
|
||||
|
||||
st->print("%9s %-40s %c= ", _type, _name, (!is_default() ? ':' : ' '));
|
||||
|
||||
if (is_bool()) {
|
||||
st->print("%-16s", get_bool() ? "true" : "false");
|
||||
}
|
||||
if (is_intx()) {
|
||||
st->print("%-16ld", get_intx());
|
||||
}
|
||||
if (is_uintx()) {
|
||||
st->print("%-16lu", get_uintx());
|
||||
}
|
||||
if (is_uint64_t()) {
|
||||
st->print("%-16lu", get_uint64_t());
|
||||
}
|
||||
if (is_double()) {
|
||||
st->print("%-16f", get_double());
|
||||
}
|
||||
if (is_ccstr()) {
|
||||
const char* cp = get_ccstr();
|
||||
if (cp != NULL) {
|
||||
const char* eol;
|
||||
while ((eol = strchr(cp, '\n')) != NULL) {
|
||||
char format_buffer[FORMAT_BUFFER_LEN];
|
||||
size_t llen = pointer_delta(eol, cp, sizeof(char));
|
||||
jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
|
||||
"%%." SIZE_FORMAT "s", llen);
|
||||
st->print(format_buffer, cp);
|
||||
st->cr();
|
||||
cp = eol+1;
|
||||
st->print("%5s %-35s += ", "", _name);
|
||||
}
|
||||
st->print("%-16s", cp);
|
||||
}
|
||||
else st->print("%-16s", "");
|
||||
}
|
||||
|
||||
st->print("%-20");
|
||||
print_kind(st);
|
||||
|
||||
if (withComments) {
|
||||
#ifndef PRODUCT
|
||||
st->print("%s", doc );
|
||||
st->print("%s", _doc);
|
||||
#endif
|
||||
}
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void Flag::print_kind(outputStream* st) {
|
||||
struct Data {
|
||||
int flag;
|
||||
const char* name;
|
||||
};
|
||||
|
||||
Data data[] = {
|
||||
{ KIND_C1, "C1" },
|
||||
{ KIND_C2, "C2" },
|
||||
{ KIND_ARCH, "ARCH" },
|
||||
{ KIND_SHARK, "SHARK" },
|
||||
{ KIND_PLATFORM_DEPENDENT, "pd" },
|
||||
{ KIND_PRODUCT, "product" },
|
||||
{ KIND_MANAGEABLE, "manageable" },
|
||||
{ KIND_DIAGNOSTIC, "diagnostic" },
|
||||
{ KIND_NOT_PRODUCT, "notproduct" },
|
||||
{ KIND_DEVELOP, "develop" },
|
||||
{ KIND_LP64_PRODUCT, "lp64_product" },
|
||||
{ KIND_READ_WRITE, "rw" },
|
||||
{ -1, "" }
|
||||
};
|
||||
|
||||
if ((_flags & KIND_MASK) != 0) {
|
||||
st->print("{");
|
||||
bool is_first = true;
|
||||
|
||||
for (int i = 0; data[i].flag != -1; i++) {
|
||||
Data d = data[i];
|
||||
if ((_flags & d.flag) != 0) {
|
||||
if (is_first) {
|
||||
is_first = false;
|
||||
} else {
|
||||
st->print(" ");
|
||||
}
|
||||
st->print(d.name);
|
||||
}
|
||||
}
|
||||
|
||||
st->print("}");
|
||||
}
|
||||
}
|
||||
|
||||
void Flag::print_as_flag(outputStream* st) {
|
||||
if (is_bool()) {
|
||||
st->print("-XX:%s%s", get_bool() ? "+" : "-", name);
|
||||
st->print("-XX:%s%s", get_bool() ? "+" : "-", _name);
|
||||
} else if (is_intx()) {
|
||||
st->print("-XX:%s=" INTX_FORMAT, name, get_intx());
|
||||
st->print("-XX:%s=" INTX_FORMAT, _name, get_intx());
|
||||
} else if (is_uintx()) {
|
||||
st->print("-XX:%s=" UINTX_FORMAT, name, get_uintx());
|
||||
st->print("-XX:%s=" UINTX_FORMAT, _name, get_uintx());
|
||||
} else if (is_uint64_t()) {
|
||||
st->print("-XX:%s=" UINT64_FORMAT, name, get_uint64_t());
|
||||
st->print("-XX:%s=" UINT64_FORMAT, _name, get_uint64_t());
|
||||
} else if (is_double()) {
|
||||
st->print("-XX:%s=%f", name, get_double());
|
||||
st->print("-XX:%s=%f", _name, get_double());
|
||||
} else if (is_ccstr()) {
|
||||
st->print("-XX:%s=", name);
|
||||
st->print("-XX:%s=", _name);
|
||||
const char* cp = get_ccstr();
|
||||
if (cp != NULL) {
|
||||
// Need to turn embedded '\n's back into separate arguments
|
||||
@ -167,7 +373,7 @@ void Flag::print_as_flag(outputStream* st) {
|
||||
st->print("%c", *cp);
|
||||
break;
|
||||
case '\n':
|
||||
st->print(" -XX:%s=", name);
|
||||
st->print(" -XX:%s=", _name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -180,79 +386,51 @@ void Flag::print_as_flag(outputStream* st) {
|
||||
// 4991491 do not "optimize out" the was_set false values: omitting them
|
||||
// tickles a Microsoft compiler bug causing flagTable to be malformed
|
||||
|
||||
#define RUNTIME_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{product}", DEFAULT },
|
||||
#define RUNTIME_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{pd product}", DEFAULT },
|
||||
#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{diagnostic}", DEFAULT },
|
||||
#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{experimental}", DEFAULT },
|
||||
#define RUNTIME_MANAGEABLE_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{manageable}", DEFAULT },
|
||||
#define RUNTIME_PRODUCT_RW_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{product rw}", DEFAULT },
|
||||
#define NAME(name) NOT_PRODUCT(&name) PRODUCT_ONLY(&CONST_##name)
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
|
||||
#else
|
||||
#define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "", DEFAULT },
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, doc, "{pd}", DEFAULT },
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{notproduct}", DEFAULT },
|
||||
#endif
|
||||
#define RUNTIME_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT) },
|
||||
#define RUNTIME_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
|
||||
#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DIAGNOSTIC) },
|
||||
#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_EXPERIMENTAL) },
|
||||
#define RUNTIME_MANAGEABLE_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_MANAGEABLE) },
|
||||
#define RUNTIME_PRODUCT_RW_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT | Flag::KIND_READ_WRITE) },
|
||||
#define RUNTIME_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DEVELOP) },
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_NOT_PRODUCT) },
|
||||
|
||||
#ifdef _LP64
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{lp64_product}", DEFAULT },
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_LP64_PRODUCT) },
|
||||
#else
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
|
||||
#endif // _LP64
|
||||
|
||||
#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
|
||||
#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
|
||||
#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT },
|
||||
#ifdef PRODUCT
|
||||
#define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
|
||||
#define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
|
||||
#define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
|
||||
#else
|
||||
#define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1}", DEFAULT },
|
||||
#define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, doc, "{C1 pd}", DEFAULT },
|
||||
#define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1 notproduct}", DEFAULT },
|
||||
#endif
|
||||
#define C1_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_PRODUCT) },
|
||||
#define C1_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
|
||||
#define C1_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DIAGNOSTIC) },
|
||||
#define C1_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DEVELOP) },
|
||||
#define C1_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
|
||||
#define C1_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_NOT_PRODUCT) },
|
||||
|
||||
#define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 product}", DEFAULT },
|
||||
#define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 pd product}", DEFAULT },
|
||||
#define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 diagnostic}", DEFAULT },
|
||||
#define C2_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 experimental}", DEFAULT },
|
||||
#ifdef PRODUCT
|
||||
#define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
|
||||
#define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
|
||||
#define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
|
||||
#else
|
||||
#define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C2}", DEFAULT },
|
||||
#define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, doc, "{C2 pd}", DEFAULT },
|
||||
#define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C2 notproduct}", DEFAULT },
|
||||
#endif
|
||||
#define C2_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_PRODUCT) },
|
||||
#define C2_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
|
||||
#define C2_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DIAGNOSTIC) },
|
||||
#define C2_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_EXPERIMENTAL) },
|
||||
#define C2_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DEVELOP) },
|
||||
#define C2_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
|
||||
#define C2_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_NOT_PRODUCT) },
|
||||
|
||||
#define ARCH_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH product}", DEFAULT },
|
||||
#define ARCH_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH diagnostic}", DEFAULT },
|
||||
#define ARCH_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH experimental}", DEFAULT },
|
||||
#ifdef PRODUCT
|
||||
#define ARCH_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
|
||||
#define ARCH_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
|
||||
#else
|
||||
#define ARCH_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{ARCH}", DEFAULT },
|
||||
#define ARCH_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{ARCH notproduct}", DEFAULT },
|
||||
#endif
|
||||
#define ARCH_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_PRODUCT) },
|
||||
#define ARCH_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DIAGNOSTIC) },
|
||||
#define ARCH_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_EXPERIMENTAL) },
|
||||
#define ARCH_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DEVELOP) },
|
||||
#define ARCH_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_NOT_PRODUCT) },
|
||||
|
||||
#define SHARK_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark product}", DEFAULT },
|
||||
#define SHARK_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark pd product}", DEFAULT },
|
||||
#define SHARK_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark diagnostic}", DEFAULT },
|
||||
#ifdef PRODUCT
|
||||
#define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
|
||||
#define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
|
||||
#define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
|
||||
#else
|
||||
#define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Shark}", DEFAULT },
|
||||
#define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, doc, "{Shark pd}", DEFAULT },
|
||||
#define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Shark notproduct}", DEFAULT },
|
||||
#endif
|
||||
#define SHARK_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_PRODUCT) },
|
||||
#define SHARK_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
|
||||
#define SHARK_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DIAGNOSTIC) },
|
||||
#define SHARK_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DEVELOP) },
|
||||
#define SHARK_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
|
||||
#define SHARK_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_NOT_PRODUCT) },
|
||||
|
||||
static Flag flagTable[] = {
|
||||
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT, RUNTIME_LP64_PRODUCT_FLAG_STRUCT)
|
||||
@ -285,9 +463,14 @@ inline bool str_equal(const char* s, const char* q, size_t len) {
|
||||
|
||||
// Search the flag table for a named flag
|
||||
Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) {
|
||||
for (Flag* current = &flagTable[0]; current->name != NULL; current++) {
|
||||
if (str_equal(current->name, name, length)) {
|
||||
// Found a matching entry. Report locked flags only if allowed.
|
||||
for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
|
||||
if (str_equal(current->_name, name, length)) {
|
||||
// Found a matching entry.
|
||||
// Don't report notproduct and develop flags in product builds.
|
||||
if (current->is_constant_in_binary()) {
|
||||
return NULL;
|
||||
}
|
||||
// Report locked flags only if allowed.
|
||||
if (!(current->is_unlocked() || current->is_unlocker())) {
|
||||
if (!allow_locked) {
|
||||
// disable use of locked flags, e.g. diagnostic, experimental,
|
||||
@ -327,8 +510,8 @@ Flag* Flag::fuzzy_match(const char* name, size_t length, bool allow_locked) {
|
||||
float score;
|
||||
float max_score = -1;
|
||||
|
||||
for (Flag* current = &flagTable[0]; current->name != NULL; current++) {
|
||||
score = str_similar(current->name, name, length);
|
||||
for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
|
||||
score = str_similar(current->_name, name, length);
|
||||
if (score > max_score) {
|
||||
max_score = score;
|
||||
match = current;
|
||||
@ -357,25 +540,25 @@ static Flag* address_of_flag(CommandLineFlagWithType flag) {
|
||||
bool CommandLineFlagsEx::is_default(CommandLineFlag flag) {
|
||||
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
|
||||
Flag* f = &Flag::flags[flag];
|
||||
return (f->origin == DEFAULT);
|
||||
return f->is_default();
|
||||
}
|
||||
|
||||
bool CommandLineFlagsEx::is_ergo(CommandLineFlag flag) {
|
||||
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
|
||||
Flag* f = &Flag::flags[flag];
|
||||
return (f->origin == ERGONOMIC);
|
||||
return f->is_ergonomic();
|
||||
}
|
||||
|
||||
bool CommandLineFlagsEx::is_cmdline(CommandLineFlag flag) {
|
||||
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
|
||||
Flag* f = &Flag::flags[flag];
|
||||
return (f->origin == COMMAND_LINE);
|
||||
return f->is_command_line();
|
||||
}
|
||||
|
||||
bool CommandLineFlags::wasSetOnCmdline(const char* name, bool* value) {
|
||||
Flag* result = Flag::find_flag((char*)name, strlen(name));
|
||||
if (result == NULL) return false;
|
||||
*value = (result->origin == COMMAND_LINE);
|
||||
*value = result->is_command_line();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -387,22 +570,22 @@ bool CommandLineFlags::boolAt(char* name, size_t len, bool* value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CommandLineFlags::boolAtPut(char* name, size_t len, bool* value, FlagValueOrigin origin) {
|
||||
bool CommandLineFlags::boolAtPut(char* name, size_t len, bool* value, Flag::Flags origin) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
if (!result->is_bool()) return false;
|
||||
bool old_value = result->get_bool();
|
||||
result->set_bool(*value);
|
||||
*value = old_value;
|
||||
result->origin = origin;
|
||||
result->set_origin(origin);
|
||||
return true;
|
||||
}
|
||||
|
||||
void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin) {
|
||||
void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin) {
|
||||
Flag* faddr = address_of_flag(flag);
|
||||
guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
|
||||
faddr->set_bool(value);
|
||||
faddr->origin = origin;
|
||||
faddr->set_origin(origin);
|
||||
}
|
||||
|
||||
bool CommandLineFlags::intxAt(char* name, size_t len, intx* value) {
|
||||
@ -413,22 +596,22 @@ bool CommandLineFlags::intxAt(char* name, size_t len, intx* value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CommandLineFlags::intxAtPut(char* name, size_t len, intx* value, FlagValueOrigin origin) {
|
||||
bool CommandLineFlags::intxAtPut(char* name, size_t len, intx* value, Flag::Flags origin) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
if (!result->is_intx()) return false;
|
||||
intx old_value = result->get_intx();
|
||||
result->set_intx(*value);
|
||||
*value = old_value;
|
||||
result->origin = origin;
|
||||
result->set_origin(origin);
|
||||
return true;
|
||||
}
|
||||
|
||||
void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin) {
|
||||
void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin) {
|
||||
Flag* faddr = address_of_flag(flag);
|
||||
guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
|
||||
faddr->set_intx(value);
|
||||
faddr->origin = origin;
|
||||
faddr->set_origin(origin);
|
||||
}
|
||||
|
||||
bool CommandLineFlags::uintxAt(char* name, size_t len, uintx* value) {
|
||||
@ -439,22 +622,22 @@ bool CommandLineFlags::uintxAt(char* name, size_t len, uintx* value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CommandLineFlags::uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin) {
|
||||
bool CommandLineFlags::uintxAtPut(char* name, size_t len, uintx* value, Flag::Flags origin) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
if (!result->is_uintx()) return false;
|
||||
uintx old_value = result->get_uintx();
|
||||
result->set_uintx(*value);
|
||||
*value = old_value;
|
||||
result->origin = origin;
|
||||
result->set_origin(origin);
|
||||
return true;
|
||||
}
|
||||
|
||||
void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin) {
|
||||
void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin) {
|
||||
Flag* faddr = address_of_flag(flag);
|
||||
guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type");
|
||||
faddr->set_uintx(value);
|
||||
faddr->origin = origin;
|
||||
faddr->set_origin(origin);
|
||||
}
|
||||
|
||||
bool CommandLineFlags::uint64_tAt(char* name, size_t len, uint64_t* value) {
|
||||
@ -465,22 +648,22 @@ bool CommandLineFlags::uint64_tAt(char* name, size_t len, uint64_t* value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin) {
|
||||
bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, Flag::Flags origin) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
if (!result->is_uint64_t()) return false;
|
||||
uint64_t old_value = result->get_uint64_t();
|
||||
result->set_uint64_t(*value);
|
||||
*value = old_value;
|
||||
result->origin = origin;
|
||||
result->set_origin(origin);
|
||||
return true;
|
||||
}
|
||||
|
||||
void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin) {
|
||||
void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin) {
|
||||
Flag* faddr = address_of_flag(flag);
|
||||
guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
|
||||
faddr->set_uint64_t(value);
|
||||
faddr->origin = origin;
|
||||
faddr->set_origin(origin);
|
||||
}
|
||||
|
||||
bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) {
|
||||
@ -491,22 +674,22 @@ bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CommandLineFlags::doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin) {
|
||||
bool CommandLineFlags::doubleAtPut(char* name, size_t len, double* value, Flag::Flags origin) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
if (!result->is_double()) return false;
|
||||
double old_value = result->get_double();
|
||||
result->set_double(*value);
|
||||
*value = old_value;
|
||||
result->origin = origin;
|
||||
result->set_origin(origin);
|
||||
return true;
|
||||
}
|
||||
|
||||
void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin) {
|
||||
void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin) {
|
||||
Flag* faddr = address_of_flag(flag);
|
||||
guarantee(faddr != NULL && faddr->is_double(), "wrong flag type");
|
||||
faddr->set_double(value);
|
||||
faddr->origin = origin;
|
||||
faddr->set_origin(origin);
|
||||
}
|
||||
|
||||
bool CommandLineFlags::ccstrAt(char* name, size_t len, ccstr* value) {
|
||||
@ -519,7 +702,7 @@ bool CommandLineFlags::ccstrAt(char* name, size_t len, ccstr* value) {
|
||||
|
||||
// Contract: Flag will make private copy of the incoming value.
|
||||
// Outgoing value is always malloc-ed, and caller MUST call free.
|
||||
bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, FlagValueOrigin origin) {
|
||||
bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin) {
|
||||
Flag* result = Flag::find_flag(name, len);
|
||||
if (result == NULL) return false;
|
||||
if (!result->is_ccstr()) return false;
|
||||
@ -530,35 +713,35 @@ bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, FlagValu
|
||||
strcpy(new_value, *value);
|
||||
}
|
||||
result->set_ccstr(new_value);
|
||||
if (result->origin == DEFAULT && old_value != NULL) {
|
||||
if (result->is_default() && old_value != NULL) {
|
||||
// Prior value is NOT heap allocated, but was a literal constant.
|
||||
char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1, mtInternal);
|
||||
strcpy(old_value_to_free, old_value);
|
||||
old_value = old_value_to_free;
|
||||
}
|
||||
*value = old_value;
|
||||
result->origin = origin;
|
||||
result->set_origin(origin);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Contract: Flag will make private copy of the incoming value.
|
||||
void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin) {
|
||||
void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin) {
|
||||
Flag* faddr = address_of_flag(flag);
|
||||
guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
|
||||
ccstr old_value = faddr->get_ccstr();
|
||||
char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal);
|
||||
strcpy(new_value, value);
|
||||
faddr->set_ccstr(new_value);
|
||||
if (faddr->origin != DEFAULT && old_value != NULL) {
|
||||
if (!faddr->is_default() && old_value != NULL) {
|
||||
// Prior value is heap allocated so free it.
|
||||
FREE_C_HEAP_ARRAY(char, old_value, mtInternal);
|
||||
}
|
||||
faddr->origin = origin;
|
||||
faddr->set_origin(origin);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
static int compare_flags(const void* void_a, const void* void_b) {
|
||||
return strcmp((*((Flag**) void_a))->name, (*((Flag**) void_b))->name);
|
||||
return strcmp((*((Flag**) void_a))->_name, (*((Flag**) void_b))->_name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -567,20 +750,19 @@ void CommandLineFlags::printSetFlags(outputStream* out) {
|
||||
// note: this method is called before the thread structure is in place
|
||||
// which means resource allocation cannot be used.
|
||||
|
||||
// Compute size
|
||||
int length= 0;
|
||||
while (flagTable[length].name != NULL) length++;
|
||||
// The last entry is the null entry.
|
||||
const size_t length = Flag::numFlags - 1;
|
||||
|
||||
// Sort
|
||||
Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
|
||||
for (int index = 0; index < length; index++) {
|
||||
array[index] = &flagTable[index];
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
array[i] = &flagTable[i];
|
||||
}
|
||||
qsort(array, length, sizeof(Flag*), compare_flags);
|
||||
|
||||
// Print
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (array[i]->origin /* naked field! */) {
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
if (array[i]->get_origin() /* naked field! */) {
|
||||
array[i]->print_as_flag(out);
|
||||
out->print(" ");
|
||||
}
|
||||
@ -603,20 +785,19 @@ void CommandLineFlags::printFlags(outputStream* out, bool withComments) {
|
||||
// note: this method is called before the thread structure is in place
|
||||
// which means resource allocation cannot be used.
|
||||
|
||||
// Compute size
|
||||
int length= 0;
|
||||
while (flagTable[length].name != NULL) length++;
|
||||
// The last entry is the null entry.
|
||||
const size_t length = Flag::numFlags - 1;
|
||||
|
||||
// Sort
|
||||
Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
|
||||
for (int index = 0; index < length; index++) {
|
||||
array[index] = &flagTable[index];
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
array[i] = &flagTable[i];
|
||||
}
|
||||
qsort(array, length, sizeof(Flag*), compare_flags);
|
||||
|
||||
// Print
|
||||
out->print_cr("[Global flags]");
|
||||
for (int i = 0; i < length; i++) {
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
if (array[i]->is_unlocked()) {
|
||||
array[i]->print_on(out, withComments);
|
||||
}
|
||||
|
@ -194,29 +194,49 @@ define_pd_global(uint64_t,MaxRAM, 1ULL*G);
|
||||
typedef const char* ccstr;
|
||||
typedef const char* ccstrlist; // represents string arguments which accumulate
|
||||
|
||||
enum FlagValueOrigin {
|
||||
DEFAULT = 0,
|
||||
COMMAND_LINE = 1,
|
||||
ENVIRON_VAR = 2,
|
||||
CONFIG_FILE = 3,
|
||||
MANAGEMENT = 4,
|
||||
ERGONOMIC = 5,
|
||||
ATTACH_ON_DEMAND = 6,
|
||||
INTERNAL = 99
|
||||
};
|
||||
|
||||
struct Flag {
|
||||
const char *type;
|
||||
const char *name;
|
||||
void* addr;
|
||||
enum Flags {
|
||||
// value origin
|
||||
DEFAULT = 0,
|
||||
COMMAND_LINE = 1,
|
||||
ENVIRON_VAR = 2,
|
||||
CONFIG_FILE = 3,
|
||||
MANAGEMENT = 4,
|
||||
ERGONOMIC = 5,
|
||||
ATTACH_ON_DEMAND = 6,
|
||||
INTERNAL = 7,
|
||||
|
||||
NOT_PRODUCT(const char *doc;)
|
||||
LAST_VALUE_ORIGIN = INTERNAL,
|
||||
VALUE_ORIGIN_BITS = 4,
|
||||
VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS),
|
||||
|
||||
const char *kind;
|
||||
FlagValueOrigin origin;
|
||||
// flag kind
|
||||
KIND_PRODUCT = 1 << 4,
|
||||
KIND_MANAGEABLE = 1 << 5,
|
||||
KIND_DIAGNOSTIC = 1 << 6,
|
||||
KIND_EXPERIMENTAL = 1 << 7,
|
||||
KIND_NOT_PRODUCT = 1 << 8,
|
||||
KIND_DEVELOP = 1 << 9,
|
||||
KIND_PLATFORM_DEPENDENT = 1 << 10,
|
||||
KIND_READ_WRITE = 1 << 11,
|
||||
KIND_C1 = 1 << 12,
|
||||
KIND_C2 = 1 << 13,
|
||||
KIND_ARCH = 1 << 14,
|
||||
KIND_SHARK = 1 << 15,
|
||||
KIND_LP64_PRODUCT = 1 << 16,
|
||||
KIND_COMMERCIAL = 1 << 17,
|
||||
|
||||
KIND_MASK = ~VALUE_ORIGIN_MASK
|
||||
};
|
||||
|
||||
const char* _type;
|
||||
const char* _name;
|
||||
void* _addr;
|
||||
NOT_PRODUCT(const char* _doc;)
|
||||
Flags _flags;
|
||||
|
||||
// points to all Flags static array
|
||||
static Flag *flags;
|
||||
static Flag* flags;
|
||||
|
||||
// number of flags
|
||||
static size_t numFlags;
|
||||
@ -224,30 +244,50 @@ struct Flag {
|
||||
static Flag* find_flag(const char* name, size_t length, bool allow_locked = false);
|
||||
static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
|
||||
|
||||
bool is_bool() const { return strcmp(type, "bool") == 0; }
|
||||
bool get_bool() const { return *((bool*) addr); }
|
||||
void set_bool(bool value) { *((bool*) addr) = value; }
|
||||
void check_writable();
|
||||
|
||||
bool is_intx() const { return strcmp(type, "intx") == 0; }
|
||||
intx get_intx() const { return *((intx*) addr); }
|
||||
void set_intx(intx value) { *((intx*) addr) = value; }
|
||||
bool is_bool() const;
|
||||
bool get_bool() const;
|
||||
void set_bool(bool value);
|
||||
|
||||
bool is_uintx() const { return strcmp(type, "uintx") == 0; }
|
||||
uintx get_uintx() const { return *((uintx*) addr); }
|
||||
void set_uintx(uintx value) { *((uintx*) addr) = value; }
|
||||
bool is_intx() const;
|
||||
intx get_intx() const;
|
||||
void set_intx(intx value);
|
||||
|
||||
bool is_uint64_t() const { return strcmp(type, "uint64_t") == 0; }
|
||||
uint64_t get_uint64_t() const { return *((uint64_t*) addr); }
|
||||
void set_uint64_t(uint64_t value) { *((uint64_t*) addr) = value; }
|
||||
bool is_uintx() const;
|
||||
uintx get_uintx() const;
|
||||
void set_uintx(uintx value);
|
||||
|
||||
bool is_double() const { return strcmp(type, "double") == 0; }
|
||||
double get_double() const { return *((double*) addr); }
|
||||
void set_double(double value) { *((double*) addr) = value; }
|
||||
bool is_uint64_t() const;
|
||||
uint64_t get_uint64_t() const;
|
||||
void set_uint64_t(uint64_t value);
|
||||
|
||||
bool is_ccstr() const { return strcmp(type, "ccstr") == 0 || strcmp(type, "ccstrlist") == 0; }
|
||||
bool ccstr_accumulates() const { return strcmp(type, "ccstrlist") == 0; }
|
||||
ccstr get_ccstr() const { return *((ccstr*) addr); }
|
||||
void set_ccstr(ccstr value) { *((ccstr*) addr) = value; }
|
||||
bool is_double() const;
|
||||
double get_double() const;
|
||||
void set_double(double value);
|
||||
|
||||
bool is_ccstr() const;
|
||||
bool ccstr_accumulates() const;
|
||||
ccstr get_ccstr() const;
|
||||
void set_ccstr(ccstr value);
|
||||
|
||||
Flags get_origin();
|
||||
void set_origin(Flags origin);
|
||||
|
||||
bool is_default();
|
||||
bool is_ergonomic();
|
||||
bool is_command_line();
|
||||
|
||||
bool is_product() const;
|
||||
bool is_manageable() const;
|
||||
bool is_diagnostic() const;
|
||||
bool is_experimental() const;
|
||||
bool is_notproduct() const;
|
||||
bool is_develop() const;
|
||||
bool is_read_write() const;
|
||||
bool is_commercial() const;
|
||||
|
||||
bool is_constant_in_binary() const;
|
||||
|
||||
bool is_unlocker() const;
|
||||
bool is_unlocked() const;
|
||||
@ -263,6 +303,7 @@ struct Flag {
|
||||
void get_locked_message_ext(char*, int) const;
|
||||
|
||||
void print_on(outputStream* st, bool withComments = false );
|
||||
void print_kind(outputStream* st);
|
||||
void print_as_flag(outputStream* st);
|
||||
};
|
||||
|
||||
@ -310,33 +351,33 @@ class CommandLineFlags {
|
||||
public:
|
||||
static bool boolAt(char* name, size_t len, bool* value);
|
||||
static bool boolAt(char* name, bool* value) { return boolAt(name, strlen(name), value); }
|
||||
static bool boolAtPut(char* name, size_t len, bool* value, FlagValueOrigin origin);
|
||||
static bool boolAtPut(char* name, bool* value, FlagValueOrigin origin) { return boolAtPut(name, strlen(name), value, origin); }
|
||||
static bool boolAtPut(char* name, size_t len, bool* value, Flag::Flags origin);
|
||||
static bool boolAtPut(char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
|
||||
|
||||
static bool intxAt(char* name, size_t len, intx* value);
|
||||
static bool intxAt(char* name, intx* value) { return intxAt(name, strlen(name), value); }
|
||||
static bool intxAtPut(char* name, size_t len, intx* value, FlagValueOrigin origin);
|
||||
static bool intxAtPut(char* name, intx* value, FlagValueOrigin origin) { return intxAtPut(name, strlen(name), value, origin); }
|
||||
static bool intxAtPut(char* name, size_t len, intx* value, Flag::Flags origin);
|
||||
static bool intxAtPut(char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
|
||||
|
||||
static bool uintxAt(char* name, size_t len, uintx* value);
|
||||
static bool uintxAt(char* name, uintx* value) { return uintxAt(name, strlen(name), value); }
|
||||
static bool uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin);
|
||||
static bool uintxAtPut(char* name, uintx* value, FlagValueOrigin origin) { return uintxAtPut(name, strlen(name), value, origin); }
|
||||
static bool uintxAtPut(char* name, size_t len, uintx* value, Flag::Flags origin);
|
||||
static bool uintxAtPut(char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
|
||||
|
||||
static bool uint64_tAt(char* name, size_t len, uint64_t* value);
|
||||
static bool uint64_tAt(char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
|
||||
static bool uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin);
|
||||
static bool uint64_tAtPut(char* name, uint64_t* value, FlagValueOrigin origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
|
||||
static bool uint64_tAtPut(char* name, size_t len, uint64_t* value, Flag::Flags origin);
|
||||
static bool uint64_tAtPut(char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
|
||||
|
||||
static bool doubleAt(char* name, size_t len, double* value);
|
||||
static bool doubleAt(char* name, double* value) { return doubleAt(name, strlen(name), value); }
|
||||
static bool doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin);
|
||||
static bool doubleAtPut(char* name, double* value, FlagValueOrigin origin) { return doubleAtPut(name, strlen(name), value, origin); }
|
||||
static bool doubleAtPut(char* name, size_t len, double* value, Flag::Flags origin);
|
||||
static bool doubleAtPut(char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
|
||||
|
||||
static bool ccstrAt(char* name, size_t len, ccstr* value);
|
||||
static bool ccstrAt(char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); }
|
||||
static bool ccstrAtPut(char* name, size_t len, ccstr* value, FlagValueOrigin origin);
|
||||
static bool ccstrAtPut(char* name, ccstr* value, FlagValueOrigin origin) { return ccstrAtPut(name, strlen(name), value, origin); }
|
||||
static bool ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin);
|
||||
static bool ccstrAtPut(char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
|
||||
|
||||
// Returns false if name is not a command line flag.
|
||||
static bool wasSetOnCmdline(const char* name, bool* value);
|
||||
@ -2830,6 +2871,10 @@ class CommandLineFlags {
|
||||
product(intx, NmethodSweepCheckInterval, 5, \
|
||||
"Compilers wake up every n seconds to possibly sweep nmethods") \
|
||||
\
|
||||
product(intx, NmethodSweepActivity, 10, \
|
||||
"Removes cold nmethods from code cache if > 0. Higher values " \
|
||||
"result in more aggressive sweeping") \
|
||||
\
|
||||
notproduct(bool, LogSweeper, false, \
|
||||
"Keep a ring buffer of sweeper activity") \
|
||||
\
|
||||
@ -3201,15 +3246,6 @@ class CommandLineFlags {
|
||||
product(bool, UseCodeCacheFlushing, true, \
|
||||
"Attempt to clean the code cache before shutting off compiler") \
|
||||
\
|
||||
product(intx, MinCodeCacheFlushingInterval, 30, \
|
||||
"Min number of seconds between code cache cleaning sessions") \
|
||||
\
|
||||
product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \
|
||||
"When less than X space left, start code cache cleaning") \
|
||||
\
|
||||
product(uintx, CodeCacheFlushingFraction, 2, \
|
||||
"Fraction of the code cache that is flushed when full") \
|
||||
\
|
||||
/* interpreter debugging */ \
|
||||
develop(intx, BinarySwitchThreshold, 5, \
|
||||
"Minimal number of lookupswitch entries for rewriting to binary " \
|
||||
@ -3730,20 +3766,20 @@ class CommandLineFlags {
|
||||
*/
|
||||
|
||||
// Interface macros
|
||||
#define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name;
|
||||
#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name;
|
||||
#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#ifdef PRODUCT
|
||||
#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) const type name = value;
|
||||
#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) const type name = pd_##name;
|
||||
#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc)
|
||||
#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type CONST_##name; const type name = value;
|
||||
#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type CONST_##name; const type name = pd_##name;
|
||||
#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type CONST_##name;
|
||||
#else
|
||||
#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name;
|
||||
#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name;
|
||||
#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name;
|
||||
#endif
|
||||
// Special LP64 flags, product only needed for now.
|
||||
#ifdef _LP64
|
||||
@ -3753,23 +3789,23 @@ class CommandLineFlags {
|
||||
#endif // _LP64
|
||||
|
||||
// Implementation macros
|
||||
#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name;
|
||||
#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name;
|
||||
#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
|
||||
#ifdef PRODUCT
|
||||
#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) /* flag name is constant */
|
||||
#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) /* flag name is constant */
|
||||
#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc)
|
||||
#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type CONST_##name = value;
|
||||
#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type CONST_##name = pd_##name;
|
||||
#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type CONST_##name = value;
|
||||
#else
|
||||
#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name;
|
||||
#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name;
|
||||
#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value;
|
||||
#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value;
|
||||
#else
|
||||
#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */
|
||||
#endif // _LP64
|
||||
|
@ -34,64 +34,42 @@
|
||||
// Parens left off in the following for the enum decl below.
|
||||
#define FLAG_MEMBER(flag) Flag_##flag
|
||||
|
||||
#define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_MANAGEABLE_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_PRODUCT_RW_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#ifdef PRODUCT
|
||||
#define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
|
||||
#else
|
||||
#define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#endif
|
||||
#define RUNTIME_MANAGEABLE_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_PRODUCT_RW_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
|
||||
#ifdef _LP64
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#else
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
|
||||
#endif // _LP64
|
||||
|
||||
#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#ifdef PRODUCT
|
||||
#define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
|
||||
#define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
|
||||
#define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
|
||||
#else
|
||||
#define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#endif
|
||||
#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
|
||||
#define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C2_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#ifdef PRODUCT
|
||||
#define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
|
||||
#define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
|
||||
#define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
|
||||
#else
|
||||
#define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#endif
|
||||
#define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C2_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
|
||||
#define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
|
||||
#define ARCH_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define ARCH_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define ARCH_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#ifdef PRODUCT
|
||||
#define ARCH_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
|
||||
#define ARCH_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
|
||||
#else
|
||||
#define ARCH_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define ARCH_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#endif
|
||||
#define ARCH_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
#define ARCH_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
|
||||
|
||||
typedef enum {
|
||||
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER, RUNTIME_LP64_PRODUCT_FLAG_MEMBER)
|
||||
@ -114,64 +92,42 @@ typedef enum {
|
||||
|
||||
#define FLAG_MEMBER_WITH_TYPE(flag,type) Flag_##flag##_##type
|
||||
|
||||
#define RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#ifdef PRODUCT
|
||||
#define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
|
||||
#else
|
||||
#define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#endif
|
||||
#define RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
|
||||
#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
|
||||
#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#ifdef PRODUCT
|
||||
#define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
|
||||
#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
|
||||
#define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
|
||||
#else
|
||||
#define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#else
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
|
||||
#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
|
||||
#endif // _LP64
|
||||
|
||||
#define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#ifdef PRODUCT
|
||||
#define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
|
||||
#define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
|
||||
#define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
|
||||
#else
|
||||
#define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#endif
|
||||
#define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
|
||||
#define ARCH_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define ARCH_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define ARCH_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#ifdef PRODUCT
|
||||
#define ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
|
||||
#define ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
|
||||
#else
|
||||
#define ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#endif
|
||||
#define ARCH_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
#define ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
|
||||
|
||||
typedef enum {
|
||||
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE,
|
||||
@ -233,19 +189,19 @@ typedef enum {
|
||||
|
||||
#define FLAG_SET_DEFAULT(name, value) ((name) = (value))
|
||||
|
||||
#define FLAG_SET_CMDLINE(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), COMMAND_LINE))
|
||||
#define FLAG_SET_ERGO(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), ERGONOMIC))
|
||||
#define FLAG_SET_CMDLINE(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), Flag::COMMAND_LINE))
|
||||
#define FLAG_SET_ERGO(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), Flag::ERGONOMIC))
|
||||
|
||||
// Can't put the following in CommandLineFlags because
|
||||
// of a circular dependency on the enum definition.
|
||||
class CommandLineFlagsEx : CommandLineFlags {
|
||||
public:
|
||||
static void boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin);
|
||||
static void intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin);
|
||||
static void uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin);
|
||||
static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin);
|
||||
static void doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin);
|
||||
static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin);
|
||||
static void boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin);
|
||||
static void intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin);
|
||||
static void uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin);
|
||||
static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin);
|
||||
static void doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin);
|
||||
static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin);
|
||||
|
||||
static bool is_default(CommandLineFlag flag);
|
||||
static bool is_ergo(CommandLineFlag flag);
|
||||
|
@ -519,8 +519,8 @@ void SafepointSynchronize::do_cleanup_tasks() {
|
||||
}
|
||||
|
||||
{
|
||||
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
|
||||
NMethodSweeper::scan_stacks();
|
||||
TraceTime t4("mark nmethods", TraceSafepointCleanupTime);
|
||||
NMethodSweeper::mark_active_nmethods();
|
||||
}
|
||||
|
||||
if (SymbolTable::needs_rehashing()) {
|
||||
|
@ -127,64 +127,79 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
|
||||
#define SWEEP(nm)
|
||||
#endif
|
||||
|
||||
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
|
||||
long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed
|
||||
int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache
|
||||
int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
|
||||
int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
|
||||
int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
|
||||
|
||||
long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
|
||||
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
|
||||
int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
|
||||
int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
|
||||
int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
|
||||
int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
|
||||
|
||||
volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
|
||||
volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass
|
||||
volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
|
||||
|
||||
jint NMethodSweeper::_locked_seen = 0;
|
||||
jint NMethodSweeper::_locked_seen = 0;
|
||||
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
|
||||
bool NMethodSweeper::_resweep = false;
|
||||
jint NMethodSweeper::_flush_token = 0;
|
||||
jlong NMethodSweeper::_last_full_flush_time = 0;
|
||||
int NMethodSweeper::_highest_marked = 0;
|
||||
int NMethodSweeper::_dead_compile_ids = 0;
|
||||
long NMethodSweeper::_last_flush_traversal_id = 0;
|
||||
bool NMethodSweeper::_request_mark_phase = false;
|
||||
|
||||
int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
|
||||
int NMethodSweeper::_total_nof_methods_reclaimed = 0;
|
||||
jlong NMethodSweeper::_total_time_sweeping = 0;
|
||||
jlong NMethodSweeper::_total_time_this_sweep = 0;
|
||||
jlong NMethodSweeper::_peak_sweep_time = 0;
|
||||
jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
|
||||
jlong NMethodSweeper::_total_disconnect_time = 0;
|
||||
jlong NMethodSweeper::_peak_disconnect_time = 0;
|
||||
jlong NMethodSweeper::_total_time_sweeping = 0;
|
||||
jlong NMethodSweeper::_total_time_this_sweep = 0;
|
||||
jlong NMethodSweeper::_peak_sweep_time = 0;
|
||||
jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
|
||||
int NMethodSweeper::_hotness_counter_reset_val = 0;
|
||||
|
||||
|
||||
class MarkActivationClosure: public CodeBlobClosure {
|
||||
public:
|
||||
virtual void do_code_blob(CodeBlob* cb) {
|
||||
// If we see an activation belonging to a non_entrant nmethod, we mark it.
|
||||
if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
|
||||
((nmethod*)cb)->mark_as_seen_on_stack();
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
|
||||
// If we see an activation belonging to a non_entrant nmethod, we mark it.
|
||||
if (nm->is_not_entrant()) {
|
||||
nm->mark_as_seen_on_stack();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
static MarkActivationClosure mark_activation_closure;
|
||||
|
||||
class SetHotnessClosure: public CodeBlobClosure {
|
||||
public:
|
||||
virtual void do_code_blob(CodeBlob* cb) {
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
|
||||
}
|
||||
}
|
||||
};
|
||||
static SetHotnessClosure set_hotness_closure;
|
||||
|
||||
|
||||
int NMethodSweeper::hotness_counter_reset_val() {
|
||||
if (_hotness_counter_reset_val == 0) {
|
||||
_hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
|
||||
}
|
||||
return _hotness_counter_reset_val;
|
||||
}
|
||||
bool NMethodSweeper::sweep_in_progress() {
|
||||
return (_current != NULL);
|
||||
}
|
||||
|
||||
void NMethodSweeper::scan_stacks() {
|
||||
// Scans the stacks of all Java threads and marks activations of not-entrant methods.
|
||||
// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
|
||||
// safepoint.
|
||||
void NMethodSweeper::mark_active_nmethods() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
||||
if (!MethodFlushing) return;
|
||||
|
||||
// No need to synchronize access, since this is always executed at a
|
||||
// safepoint.
|
||||
|
||||
// Make sure CompiledIC_lock in unlocked, since we might update some
|
||||
// inline caches. If it is, we just bail-out and try later.
|
||||
if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
|
||||
// If we do not want to reclaim not-entrant or zombie methods there is no need
|
||||
// to scan stacks
|
||||
if (!MethodFlushing) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Check for restart
|
||||
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
|
||||
if (!sweep_in_progress() && _resweep) {
|
||||
if (!sweep_in_progress() && need_marking_phase()) {
|
||||
_seen = 0;
|
||||
_invocations = NmethodSweepFraction;
|
||||
_current = CodeCache::first_nmethod();
|
||||
@ -197,30 +212,22 @@ void NMethodSweeper::scan_stacks() {
|
||||
Threads::nmethods_do(&mark_activation_closure);
|
||||
|
||||
// reset the flags since we started a scan from the beginning.
|
||||
_resweep = false;
|
||||
reset_nmethod_marking();
|
||||
_locked_seen = 0;
|
||||
_not_entrant_seen_on_stack = 0;
|
||||
} else {
|
||||
// Only set hotness counter
|
||||
Threads::nmethods_do(&set_hotness_closure);
|
||||
}
|
||||
|
||||
if (UseCodeCacheFlushing) {
|
||||
// only allow new flushes after the interval is complete.
|
||||
jlong now = os::javaTimeMillis();
|
||||
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
|
||||
jlong curr_interval = now - _last_full_flush_time;
|
||||
if (curr_interval > max_interval) {
|
||||
_flush_token = 0;
|
||||
}
|
||||
|
||||
if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
|
||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
||||
log_sweep("restart_compiler");
|
||||
}
|
||||
}
|
||||
OrderAccess::storestore();
|
||||
}
|
||||
|
||||
void NMethodSweeper::possibly_sweep() {
|
||||
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
|
||||
if (!MethodFlushing || !sweep_in_progress()) return;
|
||||
if (!MethodFlushing || !sweep_in_progress()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_invocations > 0) {
|
||||
// Only one thread at a time will sweep
|
||||
@ -258,8 +265,7 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
if (!CompileBroker::should_compile_new_jobs()) {
|
||||
// If we have turned off compilations we might as well do full sweeps
|
||||
// in order to reach the clean state faster. Otherwise the sleeping compiler
|
||||
// threads will slow down sweeping. After a few iterations the cache
|
||||
// will be clean and sweeping stops (_resweep will not be set)
|
||||
// threads will slow down sweeping.
|
||||
_invocations = 1;
|
||||
}
|
||||
|
||||
@ -271,9 +277,11 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
|
||||
int swept_count = 0;
|
||||
|
||||
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
|
||||
assert(!CodeCache_lock->owned_by_self(), "just checking");
|
||||
|
||||
int freed_memory = 0;
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
@ -299,7 +307,7 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
// Now ready to process nmethod and give up CodeCache_lock
|
||||
{
|
||||
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
process_nmethod(_current);
|
||||
freed_memory += process_nmethod(_current);
|
||||
}
|
||||
_seen++;
|
||||
_current = next;
|
||||
@ -308,11 +316,11 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
|
||||
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
|
||||
|
||||
if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
|
||||
if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
|
||||
// we've completed a scan without making progress but there were
|
||||
// nmethods we were unable to process either because they were
|
||||
// locked or were still on stack. We don't have to aggresively
|
||||
// clean them up so just stop scanning. We could scan once more
|
||||
// locked or were still on stack. We don't have to aggressively
|
||||
// clean them up so just stop scanning. We could scan once more
|
||||
// but that complicates the control logic and it's unlikely to
|
||||
// matter much.
|
||||
if (PrintMethodFlushing) {
|
||||
@ -351,9 +359,16 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
log_sweep("finished");
|
||||
}
|
||||
|
||||
// Sweeper is the only case where memory is released,
|
||||
// check here if it is time to restart the compiler.
|
||||
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
|
||||
// Sweeper is the only case where memory is released, check here if it
|
||||
// is time to restart the compiler. Only checking if there is a certain
|
||||
// amount of free memory in the code cache might lead to re-enabling
|
||||
// compilation although no memory has been released. For example, there are
|
||||
// cases when compilation was disabled although there is 4MB (or more) free
|
||||
// memory in the code cache. The reason is code cache fragmentation. Therefore,
|
||||
// it only makes sense to re-enable compilation if we have actually freed memory.
|
||||
// Note that typically several kB are released for sweeping 16MB of the code
|
||||
// cache. As a result, 'freed_memory' > 0 to restart the compiler.
|
||||
if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
|
||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
||||
log_sweep("restart_compiler");
|
||||
}
|
||||
@ -367,8 +382,8 @@ class NMethodMarker: public StackObj {
|
||||
_thread = CompilerThread::current();
|
||||
if (!nm->is_zombie() && !nm->is_unloaded()) {
|
||||
// Only expose live nmethods for scanning
|
||||
_thread->set_scanned_nmethod(nm);
|
||||
}
|
||||
_thread->set_scanned_nmethod(nm);
|
||||
}
|
||||
}
|
||||
~NMethodMarker() {
|
||||
_thread->set_scanned_nmethod(NULL);
|
||||
@ -392,20 +407,20 @@ void NMethodSweeper::release_nmethod(nmethod *nm) {
|
||||
nm->flush();
|
||||
}
|
||||
|
||||
void NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
int NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
assert(!CodeCache_lock->owned_by_self(), "just checking");
|
||||
|
||||
int freed_memory = 0;
|
||||
// Make sure this nmethod doesn't get unloaded during the scan,
|
||||
// since the locks acquired below might safepoint.
|
||||
// since safepoints may happen during acquired below locks.
|
||||
NMethodMarker nmm(nm);
|
||||
|
||||
SWEEP(nm);
|
||||
|
||||
// Skip methods that are currently referenced by the VM
|
||||
if (nm->is_locked_by_vm()) {
|
||||
// But still remember to clean-up inline caches for alive nmethods
|
||||
if (nm->is_alive()) {
|
||||
// Clean-up all inline caches that points to zombie/non-reentrant methods
|
||||
// Clean inline caches that point to zombie/non-entrant methods
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->cleanup_inline_caches();
|
||||
SWEEP(nm);
|
||||
@ -413,18 +428,19 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
_locked_seen++;
|
||||
SWEEP(nm);
|
||||
}
|
||||
return;
|
||||
return freed_memory;
|
||||
}
|
||||
|
||||
if (nm->is_zombie()) {
|
||||
// If it is first time, we see nmethod then we mark it. Otherwise,
|
||||
// we reclame it. When we have seen a zombie method twice, we know that
|
||||
// If it is the first time we see nmethod then we mark it. Otherwise,
|
||||
// we reclaim it. When we have seen a zombie method twice, we know that
|
||||
// there are no inline caches that refer to it.
|
||||
if (nm->is_marked_for_reclamation()) {
|
||||
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
|
||||
}
|
||||
freed_memory = nm->total_size();
|
||||
release_nmethod(nm);
|
||||
_flushed_count++;
|
||||
} else {
|
||||
@ -432,19 +448,19 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
|
||||
}
|
||||
nm->mark_for_reclamation();
|
||||
_resweep = true;
|
||||
request_nmethod_marking();
|
||||
_marked_count++;
|
||||
SWEEP(nm);
|
||||
}
|
||||
} else if (nm->is_not_entrant()) {
|
||||
// If there is no current activations of this method on the
|
||||
// If there are no current activations of this method on the
|
||||
// stack we can safely convert it to a zombie method
|
||||
if (nm->can_not_entrant_be_converted()) {
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
|
||||
}
|
||||
nm->make_zombie();
|
||||
_resweep = true;
|
||||
request_nmethod_marking();
|
||||
_zombified_count++;
|
||||
SWEEP(nm);
|
||||
} else {
|
||||
@ -459,159 +475,57 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
}
|
||||
} else if (nm->is_unloaded()) {
|
||||
// Unloaded code, just make it a zombie
|
||||
if (PrintMethodFlushing && Verbose)
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
|
||||
|
||||
}
|
||||
if (nm->is_osr_method()) {
|
||||
SWEEP(nm);
|
||||
// No inline caches will ever point to osr methods, so we can just remove it
|
||||
freed_memory = nm->total_size();
|
||||
release_nmethod(nm);
|
||||
_flushed_count++;
|
||||
} else {
|
||||
nm->make_zombie();
|
||||
_resweep = true;
|
||||
request_nmethod_marking();
|
||||
_zombified_count++;
|
||||
SWEEP(nm);
|
||||
}
|
||||
} else {
|
||||
assert(nm->is_alive(), "should be alive");
|
||||
|
||||
if (UseCodeCacheFlushing) {
|
||||
if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
|
||||
(_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
|
||||
// This method has not been called since the forced cleanup happened
|
||||
nm->make_not_entrant();
|
||||
if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
|
||||
// Do not make native methods and OSR-methods not-entrant
|
||||
nm->dec_hotness_counter();
|
||||
// Get the initial value of the hotness counter. This value depends on the
|
||||
// ReservedCodeCacheSize
|
||||
int reset_val = hotness_counter_reset_val();
|
||||
int time_since_reset = reset_val - nm->hotness_counter();
|
||||
double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
|
||||
// The less free space in the code cache we have - the bigger reverse_free_ratio() is.
|
||||
// I.e., 'threshold' increases with lower available space in the code cache and a higher
|
||||
// NmethodSweepActivity. If the current hotness counter - which decreases from its initial
|
||||
// value until it is reset by stack walking - is smaller than the computed threshold, the
|
||||
// corresponding nmethod is considered for removal.
|
||||
if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
|
||||
// A method is marked as not-entrant if the method is
|
||||
// 1) 'old enough': nm->hotness_counter() < threshold
|
||||
// 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
|
||||
// The second condition is necessary if we are dealing with very small code cache
|
||||
// sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
|
||||
// The second condition ensures that methods are not immediately made not-entrant
|
||||
// after compilation.
|
||||
nm->make_not_entrant();
|
||||
request_nmethod_marking();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean-up all inline caches that points to zombie/non-reentrant methods
|
||||
// Clean-up all inline caches that point to zombie/non-reentrant methods
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->cleanup_inline_caches();
|
||||
SWEEP(nm);
|
||||
}
|
||||
return freed_memory;
|
||||
}
|
||||
|
||||
// Code cache unloading: when compilers notice the code cache is getting full,
|
||||
// they will call a vm op that comes here. This code attempts to speculatively
|
||||
// unload the oldest half of the nmethods (based on the compile job id) by
|
||||
// saving the old code in a list in the CodeCache. Then
|
||||
// execution resumes. If a method so marked is not called by the second sweeper
|
||||
// stack traversal after the current one, the nmethod will be marked non-entrant and
|
||||
// got rid of by normal sweeping. If the method is called, the Method*'s
|
||||
// _code field is restored and the Method*/nmethod
|
||||
// go back to their normal state.
|
||||
void NMethodSweeper::handle_full_code_cache(bool is_full) {
|
||||
|
||||
if (is_full) {
|
||||
// Since code cache is full, immediately stop new compiles
|
||||
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
||||
log_sweep("disable_compiler");
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure only one thread can flush
|
||||
// The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
|
||||
// no need to check the timeout here.
|
||||
jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
|
||||
if (old != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
VM_HandleFullCodeCache op(is_full);
|
||||
VMThread::execute(&op);
|
||||
|
||||
// resweep again as soon as possible
|
||||
_resweep = true;
|
||||
}
|
||||
|
||||
void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
||||
// If there was a race in detecting full code cache, only run
|
||||
// one vm op for it or keep the compiler shut off
|
||||
|
||||
jlong disconnect_start_counter = os::elapsed_counter();
|
||||
|
||||
// Traverse the code cache trying to dump the oldest nmethods
|
||||
int curr_max_comp_id = CompileBroker::get_compilation_id();
|
||||
int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
|
||||
|
||||
log_sweep("start_cleaning");
|
||||
|
||||
nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
|
||||
jint disconnected = 0;
|
||||
jint made_not_entrant = 0;
|
||||
jint nmethod_count = 0;
|
||||
|
||||
while ((nm != NULL)){
|
||||
int curr_comp_id = nm->compile_id();
|
||||
|
||||
// OSR methods cannot be flushed like this. Also, don't flush native methods
|
||||
// since they are part of the JDK in most cases
|
||||
if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
|
||||
|
||||
// only count methods that can be speculatively disconnected
|
||||
nmethod_count++;
|
||||
|
||||
if (nm->is_in_use() && (curr_comp_id < flush_target)) {
|
||||
if ((nm->method()->code() == nm)) {
|
||||
// This method has not been previously considered for
|
||||
// unloading or it was restored already
|
||||
CodeCache::speculatively_disconnect(nm);
|
||||
disconnected++;
|
||||
} else if (nm->is_speculatively_disconnected()) {
|
||||
// This method was previously considered for preemptive unloading and was not called since then
|
||||
CompilationPolicy::policy()->delay_compilation(nm->method());
|
||||
nm->make_not_entrant();
|
||||
made_not_entrant++;
|
||||
}
|
||||
|
||||
if (curr_comp_id > _highest_marked) {
|
||||
_highest_marked = curr_comp_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
nm = CodeCache::alive_nmethod(CodeCache::next(nm));
|
||||
}
|
||||
|
||||
// remember how many compile_ids wheren't seen last flush.
|
||||
_dead_compile_ids = curr_max_comp_id - nmethod_count;
|
||||
|
||||
log_sweep("stop_cleaning",
|
||||
"disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
|
||||
disconnected, made_not_entrant);
|
||||
|
||||
// Shut off compiler. Sweeper will start over with a new stack scan and
|
||||
// traversal cycle and turn it back on if it clears enough space.
|
||||
if (is_full) {
|
||||
_last_full_flush_time = os::javaTimeMillis();
|
||||
}
|
||||
|
||||
jlong disconnect_end_counter = os::elapsed_counter();
|
||||
jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
|
||||
_total_disconnect_time += disconnect_time;
|
||||
_peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
|
||||
|
||||
EventCleanCodeCache event(UNTIMED);
|
||||
if (event.should_commit()) {
|
||||
event.set_starttime(disconnect_start_counter);
|
||||
event.set_endtime(disconnect_end_counter);
|
||||
event.set_disconnectedCount(disconnected);
|
||||
event.set_madeNonEntrantCount(made_not_entrant);
|
||||
event.commit();
|
||||
}
|
||||
_number_of_flushes++;
|
||||
|
||||
// After two more traversals the sweeper will get rid of unrestored nmethods
|
||||
_last_flush_traversal_id = _traversals;
|
||||
_resweep = true;
|
||||
#ifdef ASSERT
|
||||
|
||||
if(PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Print out some state information about the current sweep and the
|
||||
// state of the code cache if it's requested.
|
||||
void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
|
||||
|
@ -27,8 +27,30 @@
|
||||
|
||||
// An NmethodSweeper is an incremental cleaner for:
|
||||
// - cleanup inline caches
|
||||
// - reclamation of unreferences zombie nmethods
|
||||
//
|
||||
// - reclamation of nmethods
|
||||
// Removing nmethods from the code cache includes two operations
|
||||
// 1) mark active nmethods
|
||||
// Is done in 'mark_active_nmethods()'. This function is called at a
|
||||
// safepoint and marks all nmethods that are active on a thread's stack.
|
||||
// 2) sweep nmethods
|
||||
// Is done in sweep_code_cache(). This function is the only place in the
|
||||
// sweeper where memory is reclaimed. Note that sweep_code_cache() is not
|
||||
// called at a safepoint. However, sweep_code_cache() stops executing if
|
||||
// another thread requests a safepoint. Consequently, 'mark_active_nmethods()'
|
||||
// and sweep_code_cache() cannot execute at the same time.
|
||||
// To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can
|
||||
// be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency
|
||||
// invalidation, and (iv) being replaced be a different method version (tiered
|
||||
// compilation). Not-entrant nmethod cannot be called by Java threads, but they
|
||||
// can still be active on the stack. To ensure that active nmethod are not reclaimed,
|
||||
// we have to wait until the next marking phase has completed. If a not-entrant
|
||||
// nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely
|
||||
// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
|
||||
// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
|
||||
// state change happens during separate sweeps. It may take at least 3 sweeps before an
|
||||
// nmethod's space is freed. Sweeping is currently done by compiler threads between
|
||||
// compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
|
||||
// is full.
|
||||
|
||||
class NMethodSweeper : public AllStatic {
|
||||
static long _traversals; // Stack scan count, also sweep ID.
|
||||
@ -41,46 +63,38 @@ class NMethodSweeper : public AllStatic {
|
||||
static volatile int _invocations; // No. of invocations left until we are completed with this pass
|
||||
static volatile int _sweep_started; // Flag to control conc sweeper
|
||||
|
||||
//The following are reset in scan_stacks and synchronized by the safepoint
|
||||
static bool _resweep; // Indicates that a change has happend and we want another sweep,
|
||||
// always checked and reset at a safepoint so memory will be in sync.
|
||||
static int _locked_seen; // Number of locked nmethods encountered during the scan
|
||||
//The following are reset in mark_active_nmethods and synchronized by the safepoint
|
||||
static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse,
|
||||
// always checked and reset at a safepoint so memory will be in sync.
|
||||
static int _locked_seen; // Number of locked nmethods encountered during the scan
|
||||
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
|
||||
static jint _flush_token; // token that guards method flushing, making sure it is executed only once.
|
||||
|
||||
// These are set during a flush, a VM-operation
|
||||
static long _last_flush_traversal_id; // trav number at last flush unloading
|
||||
static jlong _last_full_flush_time; // timestamp of last emergency unloading
|
||||
|
||||
// These are synchronized by the _sweep_started token
|
||||
static int _highest_marked; // highest compile id dumped at last emergency unloading
|
||||
static int _dead_compile_ids; // number of compile ids that where not in the cache last flush
|
||||
|
||||
// Stat counters
|
||||
static int _number_of_flushes; // Total of full traversals caused by full cache
|
||||
static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed
|
||||
static jlong _total_time_sweeping; // Accumulated time sweeping
|
||||
static jlong _total_time_this_sweep; // Total time this sweep
|
||||
static jlong _peak_sweep_time; // Peak time for a full sweep
|
||||
static jlong _peak_sweep_fraction_time; // Peak time sweeping one fraction
|
||||
static jlong _total_disconnect_time; // Total time cleaning code mem
|
||||
static jlong _peak_disconnect_time; // Peak time cleaning code mem
|
||||
|
||||
static void process_nmethod(nmethod *nm);
|
||||
static int process_nmethod(nmethod *nm);
|
||||
static void release_nmethod(nmethod* nm);
|
||||
|
||||
static void log_sweep(const char* msg, const char* format = NULL, ...);
|
||||
static bool sweep_in_progress();
|
||||
static void sweep_code_cache();
|
||||
static void request_nmethod_marking() { _request_mark_phase = true; }
|
||||
static void reset_nmethod_marking() { _request_mark_phase = false; }
|
||||
static bool need_marking_phase() { return _request_mark_phase; }
|
||||
|
||||
static int _hotness_counter_reset_val;
|
||||
|
||||
public:
|
||||
static long traversal_count() { return _traversals; }
|
||||
static int number_of_flushes() { return _number_of_flushes; }
|
||||
static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
|
||||
static jlong total_time_sweeping() { return _total_time_sweeping; }
|
||||
static jlong peak_sweep_time() { return _peak_sweep_time; }
|
||||
static jlong peak_sweep_fraction_time() { return _peak_sweep_fraction_time; }
|
||||
static jlong total_disconnect_time() { return _total_disconnect_time; }
|
||||
static jlong peak_disconnect_time() { return _peak_disconnect_time; }
|
||||
static void log_sweep(const char* msg, const char* format = NULL, ...);
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
static bool is_sweeping(nmethod* which) { return _current == which; }
|
||||
@ -90,19 +104,18 @@ class NMethodSweeper : public AllStatic {
|
||||
static void report_events();
|
||||
#endif
|
||||
|
||||
static void scan_stacks(); // Invoked at the end of each safepoint
|
||||
static void sweep_code_cache(); // Concurrent part of sweep job
|
||||
static void possibly_sweep(); // Compiler threads call this to sweep
|
||||
static void mark_active_nmethods(); // Invoked at the end of each safepoint
|
||||
static void possibly_sweep(); // Compiler threads call this to sweep
|
||||
|
||||
static void notify(nmethod* nm) {
|
||||
static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2);
|
||||
static int hotness_counter_reset_val();
|
||||
|
||||
static void notify() {
|
||||
// Request a new sweep of the code cache from the beginning. No
|
||||
// need to synchronize the setting of this flag since it only
|
||||
// changes to false at safepoint so we can never overwrite it with false.
|
||||
_resweep = true;
|
||||
request_nmethod_marking();
|
||||
}
|
||||
|
||||
static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
|
||||
static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
|
||||
|
@ -176,6 +176,7 @@
|
||||
#include "opto/loopnode.hpp"
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
@ -841,7 +842,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
nonstatic_field(nmethod, _osr_link, nmethod*) \
|
||||
nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \
|
||||
nonstatic_field(nmethod, _scavenge_root_state, jbyte) \
|
||||
nonstatic_field(nmethod, _state, unsigned char) \
|
||||
nonstatic_field(nmethod, _state, volatile unsigned char) \
|
||||
nonstatic_field(nmethod, _exception_offset, int) \
|
||||
nonstatic_field(nmethod, _deoptimize_offset, int) \
|
||||
nonstatic_field(nmethod, _deoptimize_mh_offset, int) \
|
||||
@ -1185,11 +1186,10 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
/* -XX flags */ \
|
||||
/*********************/ \
|
||||
\
|
||||
nonstatic_field(Flag, type, const char*) \
|
||||
nonstatic_field(Flag, name, const char*) \
|
||||
unchecked_nonstatic_field(Flag, addr, sizeof(void*)) /* NOTE: no type */ \
|
||||
nonstatic_field(Flag, kind, const char*) \
|
||||
nonstatic_field(Flag, origin, FlagValueOrigin) \
|
||||
nonstatic_field(Flag, _type, const char*) \
|
||||
nonstatic_field(Flag, _name, const char*) \
|
||||
unchecked_nonstatic_field(Flag, _addr, sizeof(void*)) /* NOTE: no type */ \
|
||||
nonstatic_field(Flag, _flags, Flag::Flags) \
|
||||
static_field(Flag, flags, Flag*) \
|
||||
static_field(Flag, numFlags, size_t) \
|
||||
\
|
||||
@ -1360,6 +1360,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
declare_integer_type(long) \
|
||||
declare_integer_type(char) \
|
||||
declare_unsigned_integer_type(unsigned char) \
|
||||
declare_unsigned_integer_type(volatile unsigned char) \
|
||||
declare_unsigned_integer_type(u_char) \
|
||||
declare_unsigned_integer_type(unsigned int) \
|
||||
declare_unsigned_integer_type(uint) \
|
||||
@ -1382,6 +1383,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
declare_toplevel_type(char**) \
|
||||
declare_toplevel_type(u_char*) \
|
||||
declare_toplevel_type(unsigned char*) \
|
||||
declare_toplevel_type(volatile unsigned char*) \
|
||||
\
|
||||
/*******************************************************************/ \
|
||||
/* Types which it will be handy to have available over in the SA */ \
|
||||
@ -1928,6 +1930,9 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
declare_c2_type(CmpF3Node, CmpFNode) \
|
||||
declare_c2_type(CmpDNode, CmpNode) \
|
||||
declare_c2_type(CmpD3Node, CmpDNode) \
|
||||
declare_c2_type(MathExactNode, MultiNode) \
|
||||
declare_c2_type(AddExactINode, MathExactNode) \
|
||||
declare_c2_type(FlagsProjNode, ProjNode) \
|
||||
declare_c2_type(BoolNode, Node) \
|
||||
declare_c2_type(AbsNode, Node) \
|
||||
declare_c2_type(AbsINode, AbsNode) \
|
||||
@ -2074,7 +2079,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
declare_integer_type(JavaThreadState) \
|
||||
declare_integer_type(Location::Type) \
|
||||
declare_integer_type(Location::Where) \
|
||||
declare_integer_type(FlagValueOrigin) \
|
||||
declare_integer_type(Flag::Flags) \
|
||||
COMPILER2_PRESENT(declare_integer_type(OptoReg::Name)) \
|
||||
\
|
||||
declare_toplevel_type(CHeapObj<mtInternal>) \
|
||||
@ -2082,7 +2087,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
|
||||
declare_type(Array<u1>, MetaspaceObj) \
|
||||
declare_type(Array<u2>, MetaspaceObj) \
|
||||
declare_type(Array<Klass*>, MetaspaceObj) \
|
||||
declare_type(Array<Method*>, MetaspaceObj) \
|
||||
declare_type(Array<Method*>, MetaspaceObj) \
|
||||
\
|
||||
declare_integer_type(AccessFlags) /* FIXME: wrong type (not integer) */\
|
||||
declare_toplevel_type(address) /* FIXME: should this be an integer type? */\
|
||||
|
@ -173,10 +173,6 @@ void VM_UnlinkSymbols::doit() {
|
||||
SymbolTable::unlink();
|
||||
}
|
||||
|
||||
void VM_HandleFullCodeCache::doit() {
|
||||
NMethodSweeper::speculative_disconnect_nmethods(_is_full);
|
||||
}
|
||||
|
||||
void VM_Verify::doit() {
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(_silent);
|
||||
|
@ -51,7 +51,6 @@
|
||||
template(DeoptimizeAll) \
|
||||
template(ZombieAll) \
|
||||
template(UnlinkSymbols) \
|
||||
template(HandleFullCodeCache) \
|
||||
template(Verify) \
|
||||
template(PrintJNI) \
|
||||
template(HeapDumper) \
|
||||
@ -261,16 +260,6 @@ class VM_DeoptimizeFrame: public VM_Operation {
|
||||
bool allow_nested_vm_operations() const { return true; }
|
||||
};
|
||||
|
||||
class VM_HandleFullCodeCache: public VM_Operation {
|
||||
private:
|
||||
bool _is_full;
|
||||
public:
|
||||
VM_HandleFullCodeCache(bool is_full) { _is_full = is_full; }
|
||||
VMOp_Type type() const { return VMOp_HandleFullCodeCache; }
|
||||
void doit();
|
||||
bool allow_nested_vm_operations() const { return true; }
|
||||
};
|
||||
|
||||
#ifndef PRODUCT
|
||||
class VM_DeoptimizeAll: public VM_Operation {
|
||||
private:
|
||||
|
@ -245,7 +245,7 @@ static jint set_bool_flag(const char* name, AttachOperation* op, outputStream* o
|
||||
}
|
||||
value = (tmp != 0);
|
||||
}
|
||||
bool res = CommandLineFlags::boolAtPut((char*)name, &value, ATTACH_ON_DEMAND);
|
||||
bool res = CommandLineFlags::boolAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
|
||||
if (! res) {
|
||||
out->print_cr("setting flag %s failed", name);
|
||||
}
|
||||
@ -263,7 +263,7 @@ static jint set_intx_flag(const char* name, AttachOperation* op, outputStream* o
|
||||
return JNI_ERR;
|
||||
}
|
||||
}
|
||||
bool res = CommandLineFlags::intxAtPut((char*)name, &value, ATTACH_ON_DEMAND);
|
||||
bool res = CommandLineFlags::intxAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
|
||||
if (! res) {
|
||||
out->print_cr("setting flag %s failed", name);
|
||||
}
|
||||
@ -282,7 +282,7 @@ static jint set_uintx_flag(const char* name, AttachOperation* op, outputStream*
|
||||
return JNI_ERR;
|
||||
}
|
||||
}
|
||||
bool res = CommandLineFlags::uintxAtPut((char*)name, &value, ATTACH_ON_DEMAND);
|
||||
bool res = CommandLineFlags::uintxAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
|
||||
if (! res) {
|
||||
out->print_cr("setting flag %s failed", name);
|
||||
}
|
||||
@ -301,7 +301,7 @@ static jint set_uint64_t_flag(const char* name, AttachOperation* op, outputStrea
|
||||
return JNI_ERR;
|
||||
}
|
||||
}
|
||||
bool res = CommandLineFlags::uint64_tAtPut((char*)name, &value, ATTACH_ON_DEMAND);
|
||||
bool res = CommandLineFlags::uint64_tAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
|
||||
if (! res) {
|
||||
out->print_cr("setting flag %s failed", name);
|
||||
}
|
||||
@ -316,7 +316,7 @@ static jint set_ccstr_flag(const char* name, AttachOperation* op, outputStream*
|
||||
out->print_cr("flag value must be a string");
|
||||
return JNI_ERR;
|
||||
}
|
||||
bool res = CommandLineFlags::ccstrAtPut((char*)name, &value, ATTACH_ON_DEMAND);
|
||||
bool res = CommandLineFlags::ccstrAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
|
||||
if (res) {
|
||||
FREE_C_HEAP_ARRAY(char, value, mtInternal);
|
||||
} else {
|
||||
|
@ -202,7 +202,7 @@ bool ClassLoadingService::set_verbose(bool verbose) {
|
||||
MutexLocker m(Management_lock);
|
||||
|
||||
// verbose will be set to the previous value
|
||||
bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassLoading", &verbose, MANAGEMENT);
|
||||
bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassLoading", &verbose, Flag::MANAGEMENT);
|
||||
assert(succeed, "Setting TraceClassLoading flag fails");
|
||||
reset_trace_class_unloading();
|
||||
|
||||
@ -213,7 +213,7 @@ bool ClassLoadingService::set_verbose(bool verbose) {
|
||||
void ClassLoadingService::reset_trace_class_unloading() {
|
||||
assert(Management_lock->owned_by_self(), "Must own the Management_lock");
|
||||
bool value = MemoryService::get_verbose() || ClassLoadingService::get_verbose();
|
||||
bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassUnloading", &value, MANAGEMENT);
|
||||
bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassUnloading", &value, Flag::MANAGEMENT);
|
||||
assert(succeed, "Setting TraceClassUnLoading flag fails");
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ class VM_DeoptimizeTheWorld : public VM_Operation {
|
||||
|
||||
static void set_bool_flag(const char* flag, bool value) {
|
||||
CommandLineFlags::boolAtPut((char*)flag, strlen(flag), &value,
|
||||
ATTACH_ON_DEMAND);
|
||||
Flag::ATTACH_ON_DEMAND);
|
||||
}
|
||||
|
||||
// Enable only the "fine grained" flags. Do *not* touch
|
||||
|
@ -1643,9 +1643,13 @@ JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env))
|
||||
int num_entries = 0;
|
||||
for (int i = 0; i < nFlags; i++) {
|
||||
Flag* flag = &Flag::flags[i];
|
||||
// Exclude notproduct and develop flags in product builds.
|
||||
if (flag->is_constant_in_binary()) {
|
||||
continue;
|
||||
}
|
||||
// Exclude the locked (experimental, diagnostic) flags
|
||||
if (flag->is_unlocked() || flag->is_unlocker()) {
|
||||
Handle s = java_lang_String::create_from_str(flag->name, CHECK_0);
|
||||
Handle s = java_lang_String::create_from_str(flag->_name, CHECK_0);
|
||||
flags_ah->obj_at_put(num_entries, s());
|
||||
num_entries++;
|
||||
}
|
||||
@ -1669,7 +1673,7 @@ JVM_END
|
||||
bool add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag, TRAPS) {
|
||||
Handle flag_name;
|
||||
if (name() == NULL) {
|
||||
flag_name = java_lang_String::create_from_str(flag->name, CHECK_false);
|
||||
flag_name = java_lang_String::create_from_str(flag->_name, CHECK_false);
|
||||
} else {
|
||||
flag_name = name;
|
||||
}
|
||||
@ -1698,23 +1702,23 @@ bool add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag,
|
||||
|
||||
global->writeable = flag->is_writeable();
|
||||
global->external = flag->is_external();
|
||||
switch (flag->origin) {
|
||||
case DEFAULT:
|
||||
switch (flag->get_origin()) {
|
||||
case Flag::DEFAULT:
|
||||
global->origin = JMM_VMGLOBAL_ORIGIN_DEFAULT;
|
||||
break;
|
||||
case COMMAND_LINE:
|
||||
case Flag::COMMAND_LINE:
|
||||
global->origin = JMM_VMGLOBAL_ORIGIN_COMMAND_LINE;
|
||||
break;
|
||||
case ENVIRON_VAR:
|
||||
case Flag::ENVIRON_VAR:
|
||||
global->origin = JMM_VMGLOBAL_ORIGIN_ENVIRON_VAR;
|
||||
break;
|
||||
case CONFIG_FILE:
|
||||
case Flag::CONFIG_FILE:
|
||||
global->origin = JMM_VMGLOBAL_ORIGIN_CONFIG_FILE;
|
||||
break;
|
||||
case MANAGEMENT:
|
||||
case Flag::MANAGEMENT:
|
||||
global->origin = JMM_VMGLOBAL_ORIGIN_MANAGEMENT;
|
||||
break;
|
||||
case ERGONOMIC:
|
||||
case Flag::ERGONOMIC:
|
||||
global->origin = JMM_VMGLOBAL_ORIGIN_ERGONOMIC;
|
||||
break;
|
||||
default:
|
||||
@ -1781,6 +1785,10 @@ JVM_ENTRY(jint, jmm_GetVMGlobals(JNIEnv *env,
|
||||
int num_entries = 0;
|
||||
for (int i = 0; i < nFlags && num_entries < count; i++) {
|
||||
Flag* flag = &Flag::flags[i];
|
||||
// Exclude notproduct and develop flags in product builds.
|
||||
if (flag->is_constant_in_binary()) {
|
||||
continue;
|
||||
}
|
||||
// Exclude the locked (diagnostic, experimental) flags
|
||||
if ((flag->is_unlocked() || flag->is_unlocker()) &&
|
||||
add_global_entry(env, null_h, &globals[num_entries], flag, THREAD)) {
|
||||
@ -1813,23 +1821,23 @@ JVM_ENTRY(void, jmm_SetVMGlobal(JNIEnv *env, jstring flag_name, jvalue new_value
|
||||
bool succeed;
|
||||
if (flag->is_bool()) {
|
||||
bool bvalue = (new_value.z == JNI_TRUE ? true : false);
|
||||
succeed = CommandLineFlags::boolAtPut(name, &bvalue, MANAGEMENT);
|
||||
succeed = CommandLineFlags::boolAtPut(name, &bvalue, Flag::MANAGEMENT);
|
||||
} else if (flag->is_intx()) {
|
||||
intx ivalue = (intx)new_value.j;
|
||||
succeed = CommandLineFlags::intxAtPut(name, &ivalue, MANAGEMENT);
|
||||
succeed = CommandLineFlags::intxAtPut(name, &ivalue, Flag::MANAGEMENT);
|
||||
} else if (flag->is_uintx()) {
|
||||
uintx uvalue = (uintx)new_value.j;
|
||||
succeed = CommandLineFlags::uintxAtPut(name, &uvalue, MANAGEMENT);
|
||||
succeed = CommandLineFlags::uintxAtPut(name, &uvalue, Flag::MANAGEMENT);
|
||||
} else if (flag->is_uint64_t()) {
|
||||
uint64_t uvalue = (uint64_t)new_value.j;
|
||||
succeed = CommandLineFlags::uint64_tAtPut(name, &uvalue, MANAGEMENT);
|
||||
succeed = CommandLineFlags::uint64_tAtPut(name, &uvalue, Flag::MANAGEMENT);
|
||||
} else if (flag->is_ccstr()) {
|
||||
oop str = JNIHandles::resolve_external_guard(new_value.l);
|
||||
if (str == NULL) {
|
||||
THROW(vmSymbols::java_lang_NullPointerException());
|
||||
}
|
||||
ccstr svalue = java_lang_String::as_utf8_string(str);
|
||||
succeed = CommandLineFlags::ccstrAtPut(name, &svalue, MANAGEMENT);
|
||||
succeed = CommandLineFlags::ccstrAtPut(name, &svalue, Flag::MANAGEMENT);
|
||||
}
|
||||
assert(succeed, "Setting flag should succeed");
|
||||
JVM_END
|
||||
|
@ -515,7 +515,7 @@ void MemoryService::oops_do(OopClosure* f) {
|
||||
bool MemoryService::set_verbose(bool verbose) {
|
||||
MutexLocker m(Management_lock);
|
||||
// verbose will be set to the previous value
|
||||
bool succeed = CommandLineFlags::boolAtPut((char*)"PrintGC", &verbose, MANAGEMENT);
|
||||
bool succeed = CommandLineFlags::boolAtPut((char*)"PrintGC", &verbose, Flag::MANAGEMENT);
|
||||
assert(succeed, "Setting PrintGC flag fails");
|
||||
ClassLoadingService::reset_trace_class_unloading();
|
||||
|
||||
@ -618,4 +618,3 @@ TraceMemoryManagerStats::~TraceMemoryManagerStats() {
|
||||
MemoryService::gc_end(_fullGC, _recordPostGCUsage, _recordAccumulatedGCTime,
|
||||
_recordGCEndTime, _countCollection, _cause);
|
||||
}
|
||||
|
||||
|
@ -313,13 +313,6 @@ Declares a structure type that can be used in other events.
|
||||
<value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
|
||||
</event>
|
||||
|
||||
<event id="CleanCodeCache" path="vm/code_sweeper/clean" label="Clean Code Cache"
|
||||
description="Clean code cache from oldest methods"
|
||||
has_thread="true" is_requestable="false" is_constant="false">
|
||||
<value type="UINT" field="disconnectedCount" label="Methods Disconnected"/>
|
||||
<value type="UINT" field="madeNonEntrantCount" label="Methods Made Non-Entrant"/>
|
||||
</event>
|
||||
|
||||
<!-- Code cache events -->
|
||||
|
||||
<event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
|
||||
|
59
hotspot/test/compiler/intrinsics/mathexact/CondTest.java
Normal file
59
hotspot/test/compiler/intrinsics/mathexact/CondTest.java
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8024924
|
||||
* @summary Test non constant addExact
|
||||
* @compile CondTest.java Verify.java
|
||||
* @run main CondTest
|
||||
*
|
||||
*/
|
||||
|
||||
import java.lang.ArithmeticException;
|
||||
|
||||
public class CondTest {
|
||||
public static int result = 0;
|
||||
|
||||
public static void main(String[] args) {
|
||||
for (int i = 0; i < 50000; ++i) {
|
||||
runTest();
|
||||
}
|
||||
}
|
||||
|
||||
public static void runTest() {
|
||||
int i = 7;
|
||||
while (java.lang.Math.addExact(i, result) < 89361) {
|
||||
if ((java.lang.Math.addExact(i, i) & 1) == 1) {
|
||||
i += 3;
|
||||
} else if ((i & 5) == 4) {
|
||||
i += 7;
|
||||
} else if ((i & 0xf) == 6) {
|
||||
i += 2;
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
result += 2;
|
||||
}
|
||||
}
|
||||
}
|
47
hotspot/test/compiler/intrinsics/mathexact/ConstantTest.java
Normal file
47
hotspot/test/compiler/intrinsics/mathexact/ConstantTest.java
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8024924
|
||||
* @summary Test constant addExact
|
||||
* @compile ConstantTest.java Verify.java
|
||||
* @run main ConstantTest
|
||||
*
|
||||
*/
|
||||
|
||||
import java.lang.ArithmeticException;
|
||||
|
||||
public class ConstantTest {
|
||||
public static void main(String[] args) {
|
||||
for (int i = 0; i < 50000; ++i) {
|
||||
Verify.verify(5, 7);
|
||||
Verify.verify(Integer.MAX_VALUE, 1);
|
||||
Verify.verify(Integer.MIN_VALUE, -1);
|
||||
Verify.verify(Integer.MAX_VALUE, -1);
|
||||
Verify.verify(Integer.MIN_VALUE, 1);
|
||||
Verify.verify(Integer.MAX_VALUE / 2, Integer.MAX_VALUE / 2);
|
||||
Verify.verify(Integer.MAX_VALUE / 2, (Integer.MAX_VALUE / 2) + 3);
|
||||
}
|
||||
}
|
||||
}
|
55
hotspot/test/compiler/intrinsics/mathexact/LoadTest.java
Normal file
55
hotspot/test/compiler/intrinsics/mathexact/LoadTest.java
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8024924
|
||||
* @summary Test non constant addExact
|
||||
* @compile LoadTest.java Verify.java
|
||||
* @run main LoadTest
|
||||
*
|
||||
*/
|
||||
|
||||
import java.lang.ArithmeticException;
|
||||
|
||||
public class LoadTest {
|
||||
public static java.util.Random rnd = new java.util.Random();
|
||||
public static int[] values = new int[256];
|
||||
|
||||
public static void main(String[] args) {
|
||||
for (int i = 0; i < values.length; ++i) {
|
||||
values[i] = rnd.nextInt();
|
||||
}
|
||||
|
||||
for (int i = 0; i < 50000; ++i) {
|
||||
Verify.verify(values[i & 255], values[i & 255] - i);
|
||||
Verify.verify(values[i & 255] + i, values[i & 255] - i);
|
||||
Verify.verify(values[i & 255], values[i & 255]);
|
||||
if ((i & 1) == 1 && i > 5) {
|
||||
Verify.verify(values[i & 255] + i, values[i & 255] - i);
|
||||
} else {
|
||||
Verify.verify(values[i & 255] - i, values[i & 255] + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8024924
|
||||
* @summary Test non constant addExact
|
||||
* @compile LoopDependentTest.java Verify.java
|
||||
* @run main LoopDependentTest
|
||||
*
|
||||
*/
|
||||
|
||||
import java.lang.ArithmeticException;
|
||||
|
||||
public class LoopDependentTest {
|
||||
public static java.util.Random rnd = new java.util.Random();
|
||||
|
||||
public static void main(String[] args) {
|
||||
int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt();
|
||||
for (int i = 0; i < 50000; ++i) {
|
||||
Verify.verify(rnd1 + i, rnd2 + i);
|
||||
Verify.verify(rnd1 + i, rnd2 + (i & 0xff));
|
||||
Verify.verify(rnd1 - i, rnd2 - (i & 0xff));
|
||||
Verify.verify(rnd1 + i + 1, rnd2 + i + 2);
|
||||
Verify.verify(rnd1 + i * 2, rnd2 + i);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8024924
|
||||
* @summary Test non constant addExact
|
||||
* @compile NonConstantTest.java Verify.java
|
||||
* @run main NonConstantTest
|
||||
*
|
||||
*/
|
||||
|
||||
import java.lang.ArithmeticException;
|
||||
|
||||
public class NonConstantTest {
|
||||
public static java.util.Random rnd = new java.util.Random();
|
||||
|
||||
public static void main(String[] args) {
|
||||
for (int i = 0; i < 50000; ++i) {
|
||||
int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt();
|
||||
Verify.verify(rnd1, rnd2);
|
||||
Verify.verify(rnd1, rnd2 + 1);
|
||||
Verify.verify(rnd1 + 1, rnd2);
|
||||
Verify.verify(rnd1 - 1, rnd2);
|
||||
Verify.verify(rnd1, rnd2 - 1);
|
||||
}
|
||||
}
|
||||
}
|
68
hotspot/test/compiler/intrinsics/mathexact/Verify.java
Normal file
68
hotspot/test/compiler/intrinsics/mathexact/Verify.java
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
public class Verify {
|
||||
public static String throwWord(boolean threw) {
|
||||
return (threw ? "threw" : "didn't throw");
|
||||
}
|
||||
|
||||
public static void verify(int a, int b) {
|
||||
boolean exception1 = false, exception2 = false;
|
||||
int result1 = 0, result2 = 0;
|
||||
try {
|
||||
result1 = testIntrinsic(a, b);
|
||||
} catch (ArithmeticException e) {
|
||||
exception1 = true;
|
||||
}
|
||||
try {
|
||||
result2 = testNonIntrinsic(a, b);
|
||||
} catch (ArithmeticException e) {
|
||||
exception2 = true;
|
||||
}
|
||||
|
||||
if (exception1 != exception2) {
|
||||
throw new RuntimeException("Intrinsic version " + throwWord(exception1) + " exception, NonIntrinsic version " + throwWord(exception2) + " for: " + a + " + " + b);
|
||||
}
|
||||
if (result1 != result2) {
|
||||
throw new RuntimeException("Intrinsic version returned: " + a + " while NonIntrinsic version returned: " + b);
|
||||
}
|
||||
}
|
||||
|
||||
public static int testIntrinsic(int a, int b) {
|
||||
return java.lang.Math.addExact(a, b);
|
||||
}
|
||||
|
||||
public static int testNonIntrinsic(int a, int b) {
|
||||
return safeAddExact(a, b);
|
||||
}
|
||||
|
||||
// Copied java.lang.Math.addExact to avoid intrinsification
|
||||
public static int safeAddExact(int x, int y) {
|
||||
int r = x + y;
|
||||
// HD 2-12 Overflow iff both arguments have the opposite sign of the result
|
||||
if (((x ^ r) & (y ^ r)) < 0) {
|
||||
throw new ArithmeticException("integer overflow");
|
||||
}
|
||||
return r;
|
||||
}
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* A minimal classloader for loading bytecodes that could not result from
|
||||
* properly compiled Java.
|
||||
*
|
||||
* @author dr2chase
|
||||
*/
|
||||
public class ByteClassLoader extends ClassLoader {
|
||||
/**
|
||||
* (pre)load class name using classData for the definition.
|
||||
*
|
||||
* @param name
|
||||
* @param classData
|
||||
* @return
|
||||
*/
|
||||
public Class<?> loadBytes(String name, byte[] classData) {
|
||||
Class<?> clazz = defineClass(name, classData, 0, classData.length);
|
||||
resolveClass(clazz);
|
||||
return clazz;
|
||||
}
|
||||
}
|
33
hotspot/test/compiler/jsr292/methodHandleExceptions/C.java
Normal file
33
hotspot/test/compiler/jsr292/methodHandleExceptions/C.java
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* Test class -- implements I, which provides default for m, but this class
|
||||
* declares it abstract which (should) hide the interface default, and throw
|
||||
* an abstract method error if it is called (calling it requires bytecode hacking
|
||||
* or inconsistent compilation).
|
||||
*/
|
||||
public abstract class C implements I {
|
||||
public abstract int m();
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user