This commit is contained in:
Alejandro Murillo 2013-01-25 02:36:28 -08:00
commit d7fb5addc5
151 changed files with 2843 additions and 1570 deletions

View File

@ -308,4 +308,5 @@ e94068d4ff52849c8aa0786a53a59b63d1312a39 jdk8-b70
d5cb5830f570d1304ea4b196dde672a291b55f29 jdk8-b72
1e129851479e4f5df439109fca2c7be1f1613522 hs25-b15
11619f33cd683c2f1d6ef72f1c6ff3dacf5a9f1c jdk8-b73
70c89bd6b895a10d25ca70e08093c09ff2005fda hs25-b16
1a3e54283c54aaa8b3437813e8507fbdc966e5b6 jdk8-b74

View File

@ -52,6 +52,9 @@ public class InstanceKlass extends Klass {
private static int LOW_OFFSET;
private static int HIGH_OFFSET;
private static int FIELD_SLOTS;
private static short FIELDINFO_TAG_SIZE;
private static short FIELDINFO_TAG_MASK;
private static short FIELDINFO_TAG_OFFSET;
// ClassState constants
private static int CLASS_STATE_ALLOCATED;
@ -96,9 +99,13 @@ public class InstanceKlass extends Klass {
NAME_INDEX_OFFSET = db.lookupIntConstant("FieldInfo::name_index_offset").intValue();
SIGNATURE_INDEX_OFFSET = db.lookupIntConstant("FieldInfo::signature_index_offset").intValue();
INITVAL_INDEX_OFFSET = db.lookupIntConstant("FieldInfo::initval_index_offset").intValue();
LOW_OFFSET = db.lookupIntConstant("FieldInfo::low_offset").intValue();
HIGH_OFFSET = db.lookupIntConstant("FieldInfo::high_offset").intValue();
LOW_OFFSET = db.lookupIntConstant("FieldInfo::low_packed_offset").intValue();
HIGH_OFFSET = db.lookupIntConstant("FieldInfo::high_packed_offset").intValue();
FIELD_SLOTS = db.lookupIntConstant("FieldInfo::field_slots").intValue();
FIELDINFO_TAG_SIZE = db.lookupIntConstant("FIELDINFO_TAG_SIZE").shortValue();
FIELDINFO_TAG_MASK = db.lookupIntConstant("FIELDINFO_TAG_MASK").shortValue();
FIELDINFO_TAG_OFFSET = db.lookupIntConstant("FIELDINFO_TAG_OFFSET").shortValue();
// read ClassState constants
CLASS_STATE_ALLOCATED = db.lookupIntConstant("InstanceKlass::allocated").intValue();
CLASS_STATE_LOADED = db.lookupIntConstant("InstanceKlass::loaded").intValue();
@ -314,8 +321,12 @@ public class InstanceKlass extends Klass {
public int getFieldOffset(int index) {
U2Array fields = getFields();
return VM.getVM().buildIntFromShorts(fields.at(index * FIELD_SLOTS + LOW_OFFSET),
fields.at(index * FIELD_SLOTS + HIGH_OFFSET));
short lo = fields.at(index * FIELD_SLOTS + LOW_OFFSET);
short hi = fields.at(index * FIELD_SLOTS + HIGH_OFFSET);
if ((lo & FIELDINFO_TAG_MASK) == FIELDINFO_TAG_OFFSET) {
return VM.getVM().buildIntFromShorts(lo, hi) >> FIELDINFO_TAG_SIZE;
}
throw new RuntimeException("should not reach here");
}
// Accessors for declared fields

View File

@ -3,7 +3,7 @@
#
#
# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -205,7 +205,6 @@ SUNWprivate_1.1 {
JVM_NewMultiArray;
JVM_OnExit;
JVM_Open;
JVM_PrintStackTrace;
JVM_RaiseSignal;
JVM_RawMonitorCreate;
JVM_RawMonitorDestroy;

View File

@ -3,7 +3,7 @@
#
#
# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -205,7 +205,6 @@ SUNWprivate_1.1 {
JVM_NewMultiArray;
JVM_OnExit;
JVM_Open;
JVM_PrintStackTrace;
JVM_RaiseSignal;
JVM_RawMonitorCreate;
JVM_RawMonitorDestroy;

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -31,11 +31,11 @@
#
# Don't put quotes (fail windows build).
HOTSPOT_VM_COPYRIGHT=Copyright 2012
HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=15
HS_BUILD_NUMBER=17
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -201,7 +201,6 @@ SUNWprivate_1.1 {
JVM_NewMultiArray;
JVM_OnExit;
JVM_Open;
JVM_PrintStackTrace;
JVM_RaiseSignal;
JVM_RawMonitorCreate;
JVM_RawMonitorDestroy;

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -201,7 +201,6 @@ SUNWprivate_1.1 {
JVM_NewMultiArray;
JVM_OnExit;
JVM_Open;
JVM_PrintStackTrace;
JVM_RaiseSignal;
JVM_RawMonitorCreate;
JVM_RawMonitorDestroy;

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -201,7 +201,6 @@ SUNWprivate_1.1 {
JVM_NewMultiArray;
JVM_OnExit;
JVM_Open;
JVM_PrintStackTrace;
JVM_RaiseSignal;
JVM_RawMonitorCreate;
JVM_RawMonitorDestroy;

View File

@ -29,7 +29,7 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* JavaCallWrapper */ \
@ -37,22 +37,12 @@
/******************************/ \
/* JavaFrameAnchor */ \
/******************************/ \
volatile_nonstatic_field(JavaFrameAnchor, _flags, int) \
\
volatile_nonstatic_field(JavaFrameAnchor, _flags, int)
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must */
/* be present there) */
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_TYPES_OS_CPU macro (and must */
/* be present there) */
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
/******************************/ \
/* Register numbers (C2 only) */ \
/******************************/ \
@ -90,15 +80,6 @@
declare_c2_constant(R_G6_num) \
declare_c2_constant(R_G7_num)
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_INT_CONSTANTS_OS_CPU macro (and must */
/* be present there) */
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and must */
/* be present there) */
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // CPU_SPARC_VM_VMSTRUCTS_SPARC_HPP

View File

@ -259,6 +259,10 @@ void VM_Version::initialize() {
if (!has_vis1()) // Drop to 0 if no VIS1 support
UseVIS = 0;
if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
(cache_line_size > ContendedPaddingWidth))
ContendedPaddingWidth = cache_line_size;
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print("Allocation");
@ -286,6 +290,9 @@ void VM_Version::initialize() {
if (PrefetchFieldsAhead > 0) {
tty->print_cr("PrefetchFieldsAhead %d", PrefetchFieldsAhead);
}
if (ContendedPaddingWidth > 0) {
tty->print_cr("ContendedPaddingWidth %d", ContendedPaddingWidth);
}
}
#endif // PRODUCT
}

View File

@ -2263,6 +2263,18 @@ void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
}
void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256) {
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector256);
emit_int8(0x00);
emit_int8(0xC0 | encode);
emit_int8(imm8);
}
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_2(), "");
InstructionMark im(this);
@ -2475,7 +2487,7 @@ void Assembler::vptest(XMMRegister dst, Address src) {
assert(dst != xnoreg, "sanity");
int dst_enc = dst->encoding();
// swap src<->dst for encoding
vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
emit_int8(0x17);
emit_operand(dst, src);
}

View File

@ -1395,6 +1395,10 @@ private:
// Pack with unsigned saturation
void packuswb(XMMRegister dst, XMMRegister src);
void packuswb(XMMRegister dst, Address src);
void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
// Pemutation of 64bit words
void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
// SSE4.2 string instructions
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,14 +38,9 @@
#define JNICALL
typedef int jint;
#if defined(_LP64) && !defined(__APPLE__)
#if defined(_LP64)
typedef long jlong;
#else
/*
* On _LP64 __APPLE__ "long" and "long long" are both 64 bits,
* but we use the "long long" typedef to avoid complaints from
* the __APPLE__ compiler about fprintf formats.
*/
typedef long long jlong;
#endif

View File

@ -6209,6 +6209,128 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
}
BIND(L_exit);
}
// encode char[] to byte[] in ISO_8859_1
void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
XMMRegister tmp1Reg, XMMRegister tmp2Reg,
XMMRegister tmp3Reg, XMMRegister tmp4Reg,
Register tmp5, Register result) {
// rsi: src
// rdi: dst
// rdx: len
// rcx: tmp5
// rax: result
ShortBranchVerifier sbv(this);
assert_different_registers(src, dst, len, tmp5, result);
Label L_done, L_copy_1_char, L_copy_1_char_exit;
// set result
xorl(result, result);
// check for zero length
testl(len, len);
jcc(Assembler::zero, L_done);
movl(result, len);
// Setup pointers
lea(src, Address(src, len, Address::times_2)); // char[]
lea(dst, Address(dst, len, Address::times_1)); // byte[]
negptr(len);
if (UseSSE42Intrinsics || UseAVX >= 2) {
Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit;
Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
if (UseAVX >= 2) {
Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector
movdl(tmp1Reg, tmp5);
vpbroadcastd(tmp1Reg, tmp1Reg);
jmpb(L_chars_32_check);
bind(L_copy_32_chars);
vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector256 */ true);
vptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
jccb(Assembler::notZero, L_copy_32_chars_exit);
vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector256 */ true);
vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector256 */ true);
vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
bind(L_chars_32_check);
addptr(len, 32);
jccb(Assembler::lessEqual, L_copy_32_chars);
bind(L_copy_32_chars_exit);
subptr(len, 16);
jccb(Assembler::greater, L_copy_16_chars_exit);
} else if (UseSSE42Intrinsics) {
movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector
movdl(tmp1Reg, tmp5);
pshufd(tmp1Reg, tmp1Reg, 0);
jmpb(L_chars_16_check);
}
bind(L_copy_16_chars);
if (UseAVX >= 2) {
vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
vptest(tmp2Reg, tmp1Reg);
jccb(Assembler::notZero, L_copy_16_chars_exit);
vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector256 */ true);
vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector256 */ true);
} else {
if (UseAVX > 0) {
movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector256 */ false);
} else {
movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
por(tmp2Reg, tmp3Reg);
movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
por(tmp2Reg, tmp4Reg);
}
ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
jccb(Assembler::notZero, L_copy_16_chars_exit);
packuswb(tmp3Reg, tmp4Reg);
}
movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
bind(L_chars_16_check);
addptr(len, 16);
jccb(Assembler::lessEqual, L_copy_16_chars);
bind(L_copy_16_chars_exit);
subptr(len, 8);
jccb(Assembler::greater, L_copy_8_chars_exit);
bind(L_copy_8_chars);
movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
ptest(tmp3Reg, tmp1Reg);
jccb(Assembler::notZero, L_copy_8_chars_exit);
packuswb(tmp3Reg, tmp1Reg);
movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
addptr(len, 8);
jccb(Assembler::lessEqual, L_copy_8_chars);
bind(L_copy_8_chars_exit);
subptr(len, 8);
jccb(Assembler::zero, L_done);
}
bind(L_copy_1_char);
load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
testl(tmp5, 0xff00); // check if Unicode char
jccb(Assembler::notZero, L_copy_1_char_exit);
movb(Address(dst, len, Address::times_1, 0), tmp5);
addptr(len, 1);
jccb(Assembler::less, L_copy_1_char);
bind(L_copy_1_char_exit);
addptr(result, len); // len is negative count of not processed elements
bind(L_done);
}
#undef BIND
#undef BLOCK_COMMENT

View File

@ -1135,6 +1135,10 @@ public:
Register to, Register value, Register count,
Register rtmp, XMMRegister xtmp);
void encode_iso_array(Register src, Register dst, Register len,
XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
XMMRegister tmp4, Register tmp5, Register result);
#undef VIRTUAL
};

View File

@ -29,7 +29,7 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* JavaCallWrapper */ \
@ -37,31 +37,14 @@
/******************************/ \
/* JavaFrameAnchor */ \
/******************************/ \
volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*) \
\
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must */
/* be present there) */
volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*)
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_TYPES_OS_CPU macro (and must */
/* be present there) */
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_INT_CONSTANTS_OS_CPU macro (and must */
/* be present there) */
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and must */
/* be present there) */
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // CPU_X86_VM_VMSTRUCTS_X86_HPP

View File

@ -661,6 +661,14 @@ void VM_Version::get_processor_features() {
}
}
}
#if defined(COMPILER2) && defined(_ALLBSD_SOURCE)
if (MaxVectorSize > 16) {
// Limit vectors size to 16 bytes on BSD until it fixes
// restoring upper 128bit of YMM registers on return
// from signal handler.
FLAG_SET_DEFAULT(MaxVectorSize, 16);
}
#endif // COMPILER2
// Use population count instruction if available.
if (supports_popcnt()) {
@ -745,6 +753,10 @@ void VM_Version::get_processor_features() {
PrefetchFieldsAhead = prefetch_fields_ahead();
#endif
if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
(cache_line_size > ContendedPaddingWidth))
ContendedPaddingWidth = cache_line_size;
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per core: %u",
@ -791,6 +803,9 @@ void VM_Version::get_processor_features() {
if (PrefetchFieldsAhead > 0) {
tty->print_cr("PrefetchFieldsAhead %d", PrefetchFieldsAhead);
}
if (ContendedPaddingWidth > 0) {
tty->print_cr("ContendedPaddingWidth %d", ContendedPaddingWidth);
}
}
#endif // !PRODUCT
}

View File

@ -11687,6 +11687,23 @@ instruct array_equals(eDIRegP ary1, eSIRegP ary2, eAXRegI result,
ins_pipe( pipe_slow );
%}
// encode char[] to byte[] in ISO_8859_1
instruct encode_iso_array(eSIRegP src, eDIRegP dst, eDXRegI len,
regD tmp1, regD tmp2, regD tmp3, regD tmp4,
eCXRegI tmp5, eAXRegI result, eFlagsReg cr) %{
match(Set result (EncodeISOArray src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);
format %{ "Encode array $src,$dst,$len -> $result // KILL ECX, EDX, $tmp1, $tmp2, $tmp3, $tmp4, ESI, EDI " %}
ins_encode %{
__ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister,
$tmp4$$XMMRegister, $tmp5$$Register, $result$$Register);
%}
ins_pipe( pipe_slow );
%}
//----------Control Flow Instructions------------------------------------------
// Signed compare Instructions
instruct compI_eReg(eFlagsReg cr, rRegI op1, rRegI op2) %{

View File

@ -10495,6 +10495,23 @@ instruct array_equals(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
ins_pipe( pipe_slow );
%}
// encode char[] to byte[] in ISO_8859_1
instruct encode_iso_array(rsi_RegP src, rdi_RegP dst, rdx_RegI len,
regD tmp1, regD tmp2, regD tmp3, regD tmp4,
rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{
match(Set result (EncodeISOArray src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);
format %{ "Encode array $src,$dst,$len -> $result // KILL RCX, RDX, $tmp1, $tmp2, $tmp3, $tmp4, RSI, RDI " %}
ins_encode %{
__ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
$tmp1$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister,
$tmp4$$XMMRegister, $tmp5$$Register, $result$$Register);
%}
ins_pipe( pipe_slow );
%}
//----------Control Flow Instructions------------------------------------------
// Signed compare Instructions

View File

@ -98,10 +98,20 @@ BasicObjectLock* frame::interpreter_frame_monitor_end() const {
#endif // CC_INTERP
void frame::patch_pc(Thread* thread, address pc) {
// We borrow this call to set the thread pointer in the interpreter
// state; the hook to set up deoptimized frames isn't supplied it.
assert(pc == NULL, "should be");
get_interpreterState()->set_thread((JavaThread *) thread);
if (pc != NULL) {
_cb = CodeCache::find_blob(pc);
SharkFrame* sharkframe = zeroframe()->as_shark_frame();
sharkframe->set_pc(pc);
_pc = pc;
_deopt_state = is_deoptimized;
} else {
// We borrow this call to set the thread pointer in the interpreter
// state; the hook to set up deoptimized frames isn't supplied it.
assert(pc == NULL, "should be");
get_interpreterState()->set_thread((JavaThread *) thread);
}
}
bool frame::safe_for_sender(JavaThread *thread) {

View File

@ -45,27 +45,36 @@ inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
case ZeroFrame::ENTRY_FRAME:
_pc = StubRoutines::call_stub_return_pc();
_cb = NULL;
_deopt_state = not_deoptimized;
break;
case ZeroFrame::INTERPRETER_FRAME:
_pc = NULL;
_cb = NULL;
_deopt_state = not_deoptimized;
break;
case ZeroFrame::SHARK_FRAME:
case ZeroFrame::SHARK_FRAME: {
_pc = zero_sharkframe()->pc();
_cb = CodeCache::find_blob_unsafe(pc());
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
break;
}
case ZeroFrame::FAKE_STUB_FRAME:
_pc = NULL;
_cb = NULL;
_deopt_state = not_deoptimized;
break;
default:
ShouldNotReachHere();
}
_deopt_state = not_deoptimized;
}
// Accessors

View File

@ -68,6 +68,10 @@ class SharkFrame : public ZeroFrame {
return (address) value_of_word(pc_off);
}
void set_pc(address pc) const {
*((address*) addr_of_word(pc_off)) = pc;
}
intptr_t* unextended_sp() const {
return (intptr_t *) value_of_word(unextended_sp_off);
}

View File

@ -30,28 +30,12 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field)
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must */
/* be present there) */
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_TYPES_OS_CPU macro (and must */
/* be present there) */
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_INT_CONSTANTS_OS_CPU macro (and must */
/* be present there) */
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and must */
/* be present there) */
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // CPU_ZERO_VM_VMSTRUCTS_ZERO_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -243,29 +243,32 @@ void os::Bsd::initialize_system_info() {
int mib[2];
size_t len;
int cpu_val;
u_long mem_val;
julong mem_val;
/* get processors count via hw.ncpus sysctl */
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
len = sizeof(cpu_val);
if (sysctl(mib, 2, &cpu_val, &len, NULL, 0) != -1 && cpu_val >= 1) {
assert(len == sizeof(cpu_val), "unexpected data size");
set_processor_count(cpu_val);
}
else {
set_processor_count(1); // fallback
}
/* get physical memory via hw.usermem sysctl (hw.usermem is used
* instead of hw.physmem because we need size of allocatable memory
/* get physical memory via hw.memsize sysctl (hw.memsize is used
* since it returns a 64 bit value)
*/
mib[0] = CTL_HW;
mib[1] = HW_USERMEM;
mib[1] = HW_MEMSIZE;
len = sizeof(mem_val);
if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1)
if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
assert(len == sizeof(mem_val), "unexpected data size");
_physical_memory = mem_val;
else
} else {
_physical_memory = 256*1024*1024; // fallback (XXXBSD?)
}
#ifdef __OpenBSD__
{
@ -4091,11 +4094,12 @@ void os::PlatformEvent::park() { // AKA "down()"
}
-- _nParked ;
// In theory we could move the ST of 0 into _Event past the unlock(),
// but then we'd need a MEMBAR after the ST.
_Event = 0 ;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
}
guarantee (_Event >= 0, "invariant") ;
}
@ -4158,40 +4162,44 @@ int os::PlatformEvent::park(jlong millis) {
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
assert (_nParked == 0, "invariant") ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
return ret;
}
void os::PlatformEvent::unpark() {
int v, AnyWaiters ;
for (;;) {
v = _Event ;
if (v > 0) {
// The LD of _Event could have reordered or be satisfied
// by a read-aside from this processor's write buffer.
// To avoid problems execute a barrier and then
// ratify the value.
OrderAccess::fence() ;
if (_Event == v) return ;
continue ;
}
if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
// Transitions for _Event:
// 0 :=> 1
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
// unpark() calls.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
// that it will take two back-to-back park() calls for the owning
// thread to block. This has the benefit of forcing a spurious return
// from the first park() call after an unpark() call which will help
// shake out uses of park() and unpark() without condition variables.
if (Atomic::xchg(1, &_Event) >= 0) return;
// Wait for the thread associated with the event to vacate
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
int AnyWaiters = _nParked;
assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
AnyWaiters = 0;
pthread_cond_signal(_cond);
}
if (v < 0) {
// Wait for the thread associated with the event to vacate
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
AnyWaiters = _nParked ;
assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
AnyWaiters = 0 ;
pthread_cond_signal (_cond);
}
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
if (AnyWaiters != 0) {
status = pthread_cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
if (AnyWaiters != 0) {
status = pthread_cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
// Note that we signal() _after dropping the lock for "immortal" Events.
@ -4277,13 +4285,14 @@ static void unpackTime(struct timespec* absTime, bool isAbsolute, jlong time) {
}
void Parker::park(bool isAbsolute, jlong time) {
// Ideally we'd do something useful while spinning, such
// as calling unpackTime().
// Optional fast-path check:
// Return immediately if a permit is available.
if (_counter > 0) {
_counter = 0 ;
OrderAccess::fence();
return ;
}
// We depend on Atomic::xchg() having full barrier semantics
// since we are doing a lock-free update to _counter.
if (Atomic::xchg(0, &_counter) > 0) return;
Thread* thread = Thread::current();
assert(thread->is_Java_thread(), "Must be JavaThread");
@ -4324,6 +4333,8 @@ void Parker::park(bool isAbsolute, jlong time) {
_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
return;
}
@ -4360,12 +4371,14 @@ void Parker::park(bool isAbsolute, jlong time) {
_counter = 0 ;
status = pthread_mutex_unlock(_mutex) ;
assert_status(status == 0, status, "invariant") ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
// If externally suspended while waiting, re-suspend
if (jt->handle_special_suspend_equivalent_condition()) {
jt->java_suspend_self();
}
OrderAccess::fence();
}
void Parker::unpark() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,14 +59,6 @@ inline const char* os::path_separator() {
return ":";
}
inline const char* os::jlong_format_specifier() {
return "%lld";
}
inline const char* os::julong_format_specifier() {
return "%llu";
}
// File names are case-sensitive on windows only
inline int os::file_name_strcmp(const char* s1, const char* s2) {
return strcmp(s1, s2);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -5001,11 +5001,12 @@ void os::PlatformEvent::park() { // AKA "down()"
}
-- _nParked ;
// In theory we could move the ST of 0 into _Event past the unlock(),
// but then we'd need a MEMBAR after the ST.
_Event = 0 ;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
}
guarantee (_Event >= 0, "invariant") ;
}
@ -5068,40 +5069,44 @@ int os::PlatformEvent::park(jlong millis) {
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
assert (_nParked == 0, "invariant") ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
return ret;
}
void os::PlatformEvent::unpark() {
int v, AnyWaiters ;
for (;;) {
v = _Event ;
if (v > 0) {
// The LD of _Event could have reordered or be satisfied
// by a read-aside from this processor's write buffer.
// To avoid problems execute a barrier and then
// ratify the value.
OrderAccess::fence() ;
if (_Event == v) return ;
continue ;
}
if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
// Transitions for _Event:
// 0 :=> 1
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
// unpark() calls.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
// that it will take two back-to-back park() calls for the owning
// thread to block. This has the benefit of forcing a spurious return
// from the first park() call after an unpark() call which will help
// shake out uses of park() and unpark() without condition variables.
if (Atomic::xchg(1, &_Event) >= 0) return;
// Wait for the thread associated with the event to vacate
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
int AnyWaiters = _nParked;
assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
AnyWaiters = 0;
pthread_cond_signal(_cond);
}
if (v < 0) {
// Wait for the thread associated with the event to vacate
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
AnyWaiters = _nParked ;
assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
AnyWaiters = 0 ;
pthread_cond_signal (_cond);
}
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
if (AnyWaiters != 0) {
status = pthread_cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
if (AnyWaiters != 0) {
status = pthread_cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
// Note that we signal() _after dropping the lock for "immortal" Events.
@ -5187,13 +5192,14 @@ static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
}
void Parker::park(bool isAbsolute, jlong time) {
// Ideally we'd do something useful while spinning, such
// as calling unpackTime().
// Optional fast-path check:
// Return immediately if a permit is available.
if (_counter > 0) {
_counter = 0 ;
OrderAccess::fence();
return ;
}
// We depend on Atomic::xchg() having full barrier semantics
// since we are doing a lock-free update to _counter.
if (Atomic::xchg(0, &_counter) > 0) return;
Thread* thread = Thread::current();
assert(thread->is_Java_thread(), "Must be JavaThread");
@ -5234,6 +5240,8 @@ void Parker::park(bool isAbsolute, jlong time) {
_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
return;
}
@ -5270,12 +5278,14 @@ void Parker::park(bool isAbsolute, jlong time) {
_counter = 0 ;
status = pthread_mutex_unlock(_mutex) ;
assert_status(status == 0, status, "invariant") ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
// If externally suspended while waiting, re-suspend
if (jt->handle_special_suspend_equivalent_condition()) {
jt->java_suspend_self();
}
OrderAccess::fence();
}
void Parker::unpark() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,14 +68,6 @@ inline const char* os::path_separator() {
return ":";
}
inline const char* os::jlong_format_specifier() {
return "%lld";
}
inline const char* os::julong_format_specifier() {
return "%llu";
}
// File names are case-sensitive on windows only
inline int os::file_name_strcmp(const char* s1, const char* s2) {
return strcmp(s1, s2);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1876,11 +1876,6 @@ void SplashFreeLibrary() {
}
}
const char *
jlong_format_specifier() {
return "%lld";
}
/*
* Block current thread and continue execution in a new thread
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,6 +64,12 @@
#define Counter2Micros(counts) (1)
#endif /* HAVE_GETHRTIME */
#ifdef _LP64
#define JLONG_FORMAT "%ld"
#else
#define JLONG_FORMAT "%lld"
#endif
/*
* Function prototypes.
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -6014,6 +6014,9 @@ void os::PlatformEvent::park() { // AKA: down()
_Event = 0 ;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
}
}
@ -6055,51 +6058,43 @@ int os::PlatformEvent::park(jlong millis) {
_Event = 0 ;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
return ret;
}
void os::PlatformEvent::unpark() {
int v, AnyWaiters;
// Transitions for _Event:
// 0 :=> 1
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
// unpark() calls.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
// that it will take two back-to-back park() calls for the owning
// thread to block. This has the benefit of forcing a spurious return
// from the first park() call after an unpark() call which will help
// shake out uses of park() and unpark() without condition variables.
// Increment _Event.
// Another acceptable implementation would be to simply swap 1
// into _Event:
// if (Swap (&_Event, 1) < 0) {
// mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ;
// if (AnyWaiters) cond_signal (_cond) ;
// }
for (;;) {
v = _Event ;
if (v > 0) {
// The LD of _Event could have reordered or be satisfied
// by a read-aside from this processor's write buffer.
// To avoid problems execute a barrier and then
// ratify the value. A degenerate CAS() would also work.
// Viz., CAS (v+0, &_Event, v) == v).
OrderAccess::fence() ;
if (_Event == v) return ;
continue ;
}
if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
}
if (Atomic::xchg(1, &_Event) >= 0) return;
// If the thread associated with the event was parked, wake it.
if (v < 0) {
int status ;
// Wait for the thread assoc with the PlatformEvent to vacate.
status = os::Solaris::mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
AnyWaiters = _nParked ;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
if (AnyWaiters != 0) {
// We intentional signal *after* dropping the lock
// to avoid a common class of futile wakeups.
status = os::Solaris::cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
// Wait for the thread assoc with the PlatformEvent to vacate.
int status = os::Solaris::mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
int AnyWaiters = _nParked;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
if (AnyWaiters != 0) {
// We intentional signal *after* dropping the lock
// to avoid a common class of futile wakeups.
status = os::Solaris::cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
}
@ -6177,14 +6172,14 @@ static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
}
void Parker::park(bool isAbsolute, jlong time) {
// Ideally we'd do something useful while spinning, such
// as calling unpackTime().
// Optional fast-path check:
// Return immediately if a permit is available.
if (_counter > 0) {
_counter = 0 ;
OrderAccess::fence();
return ;
}
// We depend on Atomic::xchg() having full barrier semantics
// since we are doing a lock-free update to _counter.
if (Atomic::xchg(0, &_counter) > 0) return;
// Optional fast-exit: Check interrupt before trying to wait
Thread* thread = Thread::current();
@ -6226,6 +6221,8 @@ void Parker::park(bool isAbsolute, jlong time) {
_counter = 0;
status = os::Solaris::mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
return;
}
@ -6267,12 +6264,14 @@ void Parker::park(bool isAbsolute, jlong time) {
_counter = 0 ;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock") ;
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
// If externally suspended while waiting, re-suspend
if (jt->handle_special_suspend_equivalent_condition()) {
jt->java_suspend_self();
}
OrderAccess::fence();
}
void Parker::unpark() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,9 +50,6 @@ inline const char* os::file_separator() { return "/"; }
inline const char* os::line_separator() { return "\n"; }
inline const char* os::path_separator() { return ":"; }
inline const char* os::jlong_format_specifier() { return "%lld"; }
inline const char* os::julong_format_specifier() { return "%llu"; }
// File names are case-sensitive on windows only
inline int os::file_name_strcmp(const char* s1, const char* s2) {
return strcmp(s1, s2);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1323,11 +1323,6 @@ void SplashFreeLibrary() {
}
}
const char *
jlong_format_specifier() {
return "%I64d";
}
/*
* Block current thread and continue execution in a new thread
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,6 +69,8 @@ extern jlong Counter2Micros(jlong counts);
extern int _main(int argc, char **argv);
#endif
#define JLONG_FORMAT "%I64d"
/*
* Function prototypes.
*/

View File

@ -49,7 +49,7 @@ void WindowsDecoder::initialize() {
pfn_SymSetOptions _pfnSymSetOptions = (pfn_SymSetOptions)::GetProcAddress(handle, "SymSetOptions");
pfn_SymInitialize _pfnSymInitialize = (pfn_SymInitialize)::GetProcAddress(handle, "SymInitialize");
_pfnSymGetSymFromAddr64 = (pfn_SymGetSymFromAddr64)::GetProcAddress(handle, "SymGetSymFromAddr64");
_pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)GetProcAddress(handle, "UnDecorateSymbolName");
_pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName");
if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
_pfnSymGetSymFromAddr64 = NULL;
@ -60,8 +60,9 @@ void WindowsDecoder::initialize() {
return;
}
_pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS);
if (!_pfnSymInitialize(GetCurrentProcess(), NULL, TRUE)) {
HANDLE hProcess = ::GetCurrentProcess();
_pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
if (!_pfnSymInitialize(hProcess, NULL, TRUE)) {
_pfnSymGetSymFromAddr64 = NULL;
_pfnUndecorateSymbolName = NULL;
::FreeLibrary(handle);
@ -70,6 +71,77 @@ void WindowsDecoder::initialize() {
return;
}
// set pdb search paths
pfn_SymSetSearchPath _pfn_SymSetSearchPath =
(pfn_SymSetSearchPath)::GetProcAddress(handle, "SymSetSearchPath");
pfn_SymGetSearchPath _pfn_SymGetSearchPath =
(pfn_SymGetSearchPath)::GetProcAddress(handle, "SymGetSearchPath");
if (_pfn_SymSetSearchPath != NULL && _pfn_SymGetSearchPath != NULL) {
char paths[MAX_PATH];
int len = sizeof(paths);
if (!_pfn_SymGetSearchPath(hProcess, paths, len)) {
paths[0] = '\0';
} else {
// available spaces in path buffer
len -= (int)strlen(paths);
}
char tmp_path[MAX_PATH];
DWORD dwSize;
HMODULE hJVM = ::GetModuleHandle("jvm.dll");
tmp_path[0] = '\0';
// append the path where jvm.dll is located
if (hJVM != NULL && (dwSize = ::GetModuleFileName(hJVM, tmp_path, sizeof(tmp_path))) > 0) {
while (dwSize > 0 && tmp_path[dwSize] != '\\') {
dwSize --;
}
tmp_path[dwSize] = '\0';
if (dwSize > 0 && len > (int)dwSize + 1) {
strncat(paths, os::path_separator(), 1);
strncat(paths, tmp_path, dwSize);
len -= dwSize + 1;
}
}
// append $JRE/bin. Arguments::get_java_home actually returns $JRE
// path
char *p = Arguments::get_java_home();
assert(p != NULL, "empty java home");
size_t java_home_len = strlen(p);
if (len > (int)java_home_len + 5) {
strncat(paths, os::path_separator(), 1);
strncat(paths, p, java_home_len);
strncat(paths, "\\bin", 4);
len -= (int)(java_home_len + 5);
}
// append $JDK/bin path if it exists
assert(java_home_len < MAX_PATH, "Invalid path length");
// assume $JRE is under $JDK, construct $JDK/bin path and
// see if it exists or not
if (strncmp(&p[java_home_len - 3], "jre", 3) == 0) {
strncpy(tmp_path, p, java_home_len - 3);
tmp_path[java_home_len - 3] = '\0';
strncat(tmp_path, "bin", 3);
// if the directory exists
DWORD dwAttrib = GetFileAttributes(tmp_path);
if (dwAttrib != INVALID_FILE_ATTRIBUTES &&
(dwAttrib & FILE_ATTRIBUTE_DIRECTORY)) {
// tmp_path should have the same length as java_home_len, since we only
// replaced 'jre' with 'bin'
if (len > (int)java_home_len + 1) {
strncat(paths, os::path_separator(), 1);
strncat(paths, tmp_path, java_home_len);
}
}
}
_pfn_SymSetSearchPath(hProcess, paths);
}
// find out if jvm.dll contains private symbols, by decoding
// current function and comparing the result
address addr = (address)Decoder::demangle;

View File

@ -35,6 +35,8 @@ typedef DWORD (WINAPI *pfn_SymSetOptions)(DWORD);
typedef BOOL (WINAPI *pfn_SymInitialize)(HANDLE, PCTSTR, BOOL);
typedef BOOL (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
typedef BOOL (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
typedef BOOL (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
class WindowsDecoder : public AbstractDecoder {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1874,8 +1874,22 @@ static BOOL WINAPI consoleHandler(DWORD event) {
}
return TRUE;
break;
case CTRL_LOGOFF_EVENT: {
// Don't terminate JVM if it is running in a non-interactive session,
// such as a service process.
USEROBJECTFLAGS flags;
HANDLE handle = GetProcessWindowStation();
if (handle != NULL &&
GetUserObjectInformation(handle, UOI_FLAGS, &flags,
sizeof( USEROBJECTFLAGS), NULL)) {
// If it is a non-interactive session, let next handler to deal
// with it.
if ((flags.dwFlags & WSF_VISIBLE) == 0) {
return FALSE;
}
}
}
case CTRL_CLOSE_EVENT:
case CTRL_LOGOFF_EVENT:
case CTRL_SHUTDOWN_EVENT:
os::signal_raise(SIGTERM);
return TRUE;
@ -2946,7 +2960,7 @@ char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
}
if( Verbose && PrintMiscellaneous ) {
reserveTimer.stop();
tty->print_cr("reserve_memory of %Ix bytes took %ld ms (%ld ticks)", bytes,
tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
reserveTimer.milliseconds(), reserveTimer.ticks());
}
}
@ -4305,7 +4319,7 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
if (hFile == NULL) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("CreateFile() failed: GetLastError->%ld.");
tty->print_cr("CreateFile() failed: GetLastError->%ld.", err);
}
return NULL;
}
@ -4355,7 +4369,7 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
if (hMap == NULL) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.");
tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err);
}
CloseHandle(hFile);
return NULL;
@ -4565,6 +4579,7 @@ int os::PlatformEvent::park (jlong Millis) {
}
v = _Event ;
_Event = 0 ;
// see comment at end of os::PlatformEvent::park() below:
OrderAccess::fence() ;
// If we encounter a nearly simultanous timeout expiry and unpark()
// we return OS_OK indicating we awoke via unpark().
@ -4602,25 +4617,25 @@ void os::PlatformEvent::park () {
void os::PlatformEvent::unpark() {
guarantee (_ParkHandle != NULL, "Invariant") ;
int v ;
for (;;) {
v = _Event ; // Increment _Event if it's < 1.
if (v > 0) {
// If it's already signaled just return.
// The LD of _Event could have reordered or be satisfied
// by a read-aside from this processor's write buffer.
// To avoid problems execute a barrier and then
// ratify the value. A degenerate CAS() would also work.
// Viz., CAS (v+0, &_Event, v) == v).
OrderAccess::fence() ;
if (_Event == v) return ;
continue ;
}
if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
}
if (v < 0) {
::SetEvent (_ParkHandle) ;
}
// Transitions for _Event:
// 0 :=> 1
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
// unpark() calls.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
// that it will take two back-to-back park() calls for the owning
// thread to block. This has the benefit of forcing a spurious return
// from the first park() call after an unpark() call which will help
// shake out uses of park() and unpark() without condition variables.
if (Atomic::xchg(1, &_Event) >= 0) return;
::SetEvent(_ParkHandle);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,9 +38,6 @@ inline const char* os::line_separator() { return "\r\n"; }
inline const char* os::path_separator() { return ";"; }
inline const char* os::dll_file_extension() { return ".dll"; }
inline const char* os::jlong_format_specifier() { return "%I64d"; }
inline const char* os::julong_format_specifier() { return "%I64u"; }
inline const int os::default_file_open_flags() { return O_BINARY | O_NOINHERIT;}
// File names are case-insensitive on windows only

View File

@ -29,37 +29,26 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
nonstatic_field(OSThread, _pthread_id, pthread_t) \
/* This must be the last entry, and must be present */ \
last_entry()
nonstatic_field(OSThread, _pthread_id, pthread_t)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
/**********************/ \
/* Posix Thread IDs */ \
/**********************/ \
\
declare_unsigned_integer_type(OSThread::thread_id_t) \
declare_unsigned_integer_type(pthread_t) \
\
/* This must be the last entry, and must be present */ \
last_entry()
declare_unsigned_integer_type(pthread_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_BSD_X86_VM_VMSTRUCTS_BSD_X86_HPP

View File

@ -30,21 +30,13 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_BSD_ZERO_VM_VMSTRUCTS_BSD_ZERO_HPP

View File

@ -29,7 +29,7 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
@ -37,38 +37,27 @@
\
nonstatic_field(JavaThread, _base_of_stack_pointer, intptr_t*) \
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
nonstatic_field(OSThread, _pthread_id, pthread_t) \
/* This must be the last entry, and must be present */ \
last_entry()
nonstatic_field(OSThread, _pthread_id, pthread_t)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
/**********************/ \
/* POSIX Thread IDs */ \
/**********************/ \
\
declare_integer_type(OSThread::thread_id_t) \
declare_unsigned_integer_type(pthread_t) \
\
/* This must be the last entry, and must be present */ \
last_entry()
declare_unsigned_integer_type(pthread_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
\
/************************/ \
/* JavaThread constants */ \
/************************/ \
\
declare_constant(JavaFrameAnchor::flushed) \
\
/* This must be the last entry, and must be present */ \
last_entry()
declare_constant(JavaFrameAnchor::flushed)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_LINUX_SPARC_VM_VMSTRUCTS_LINUX_SPARC_HPP

View File

@ -29,37 +29,26 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
nonstatic_field(OSThread, _pthread_id, pthread_t) \
/* This must be the last entry, and must be present */ \
last_entry()
nonstatic_field(OSThread, _pthread_id, pthread_t)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
/**********************/ \
/* Posix Thread IDs */ \
/**********************/ \
\
declare_integer_type(OSThread::thread_id_t) \
declare_unsigned_integer_type(pthread_t) \
\
/* This must be the last entry, and must be present */ \
last_entry()
declare_unsigned_integer_type(pthread_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_LINUX_X86_VM_VMSTRUCTS_LINUX_X86_HPP

View File

@ -30,21 +30,12 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_LINUX_ZERO_VM_VMSTRUCTS_LINUX_ZERO_HPP

View File

@ -29,44 +29,32 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
\
nonstatic_field(JavaThread, _base_of_stack_pointer, intptr_t*) \
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
/* This must be the last entry, and must be present */ \
last_entry()
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
/**********************/ \
/* Solaris Thread IDs */ \
/**********************/ \
\
declare_unsigned_integer_type(OSThread::thread_id_t) \
\
/* This must be the last entry, and must be present */ \
last_entry()
declare_unsigned_integer_type(OSThread::thread_id_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
\
/************************/ \
/* JavaThread constants */ \
/************************/ \
\
declare_constant(JavaFrameAnchor::flushed) \
\
/* This must be the last entry, and must be present */ \
last_entry()
declare_constant(JavaFrameAnchor::flushed)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_SOLARIS_SPARC_VM_VMSTRUCTS_SOLARIS_SPARC_HPP

View File

@ -29,36 +29,24 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
\
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
\
/* This must be the last entry, and must be present */ \
last_entry()
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
/**********************/ \
/* Solaris Thread IDs */ \
/**********************/ \
\
declare_unsigned_integer_type(OSThread::thread_id_t) \
\
/* This must be the last entry, and must be present */ \
last_entry()
declare_unsigned_integer_type(OSThread::thread_id_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_SOLARIS_X86_VM_VMSTRUCTS_SOLARIS_X86_HPP

View File

@ -29,32 +29,21 @@
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
\
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
unchecked_nonstatic_field(OSThread, _thread_handle, sizeof(HANDLE)) /* NOTE: no type */ \
\
/* This must be the last entry, and must be present */ \
last_entry()
unchecked_nonstatic_field(OSThread, _thread_handle, sizeof(HANDLE)) /* NOTE: no type */
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
declare_unsigned_integer_type(OSThread::thread_id_t) \
/* This must be the last entry, and must be present */ \
last_entry()
declare_unsigned_integer_type(OSThread::thread_id_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
\
/* This must be the last entry, and must be present */ \
last_entry()
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_WINDOWS_X86_VM_VMSTRUCTS_WINDOWS_X86_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -808,7 +808,7 @@ CheckJvmType(int *pargc, char ***argv, jboolean speculative) {
static int
parse_stack_size(const char *s, jlong *result) {
jlong n = 0;
int args_read = sscanf(s, jlong_format_specifier(), &n);
int args_read = sscanf(s, JLONG_FORMAT, &n);
if (args_read != 1) {
return 0;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,7 +86,6 @@ void ReportExceptionDescription(JNIEnv * env);
jboolean RemovableMachineDependentOption(char * option);
void PrintMachineDependentOptions();
const char *jlong_format_specifier();
/*
* Block current thread and continue execution in new thread
*/

View File

@ -862,8 +862,10 @@ uint InstructForm::oper_input_base(FormDict &globals) {
( strcmp(_matrule->_rChild->_opType,"AryEq" )==0 ||
strcmp(_matrule->_rChild->_opType,"StrComp" )==0 ||
strcmp(_matrule->_rChild->_opType,"StrEquals" )==0 ||
strcmp(_matrule->_rChild->_opType,"StrIndexOf")==0 )) {
strcmp(_matrule->_rChild->_opType,"StrIndexOf")==0 ||
strcmp(_matrule->_rChild->_opType,"EncodeISOArray")==0)) {
// String.(compareTo/equals/indexOf) and Arrays.equals
// and sun.nio.cs.iso8859_1$Encoder.EncodeISOArray
// take 1 control and 1 memory edges.
return 2;
}

View File

@ -3223,7 +3223,12 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Co
}
if (try_inline_full(callee, holder_known, bc, receiver))
return true;
print_inlining(callee, _inline_bailout_msg, /*success*/ false);
// Entire compilation could fail during try_inline_full call.
// In that case printing inlining decision info is useless.
if (!bailed_out())
print_inlining(callee, _inline_bailout_msg, /*success*/ false);
return false;
}
@ -3753,7 +3758,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
push_scope(callee, cont);
// the BlockListBuilder for the callee could have bailed out
CHECK_BAILOUT_(false);
if (bailed_out())
return false;
// Temporarily set up bytecode stream so we can append instructions
// (only using the bci of this stream)
@ -3819,7 +3825,8 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
iterate_all_blocks(callee_start_block == NULL);
// If we bailed out during parsing, return immediately (this is bad news)
if (bailed_out()) return false;
if (bailed_out())
return false;
// iterate_all_blocks theoretically traverses in random order; in
// practice, we have only traversed the continuation if we are
@ -3828,9 +3835,6 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
!continuation()->is_set(BlockBegin::was_visited_flag),
"continuation should not have been parsed yet if we created it");
// If we bailed out during parsing, return immediately (this is bad news)
CHECK_BAILOUT_(false);
// At this point we are almost ready to return and resume parsing of
// the caller back in the GraphBuilder. The only thing we want to do
// first is an optimization: during parsing of the callee we
@ -4171,7 +4175,10 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
else
log->inline_success("receiver is statically known");
} else {
log->inline_fail(msg);
if (msg != NULL)
log->inline_fail(msg);
else
log->inline_fail("reason unknown");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -360,7 +360,7 @@ void InstructionPrinter::do_Constant(Constant* x) {
ValueType* t = x->type();
switch (t->tag()) {
case intTag : output()->print("%d" , t->as_IntConstant ()->value()); break;
case longTag : output()->print(os::jlong_format_specifier(), t->as_LongConstant()->value()); output()->print("L"); break;
case longTag : output()->print(JLONG_FORMAT, t->as_LongConstant()->value()); output()->print("L"); break;
case floatTag : output()->print("%g" , t->as_FloatConstant ()->value()); break;
case doubleTag : output()->print("%gD" , t->as_DoubleConstant()->value()); break;
case objectTag : print_object(x); break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1563,7 +1563,7 @@ void LIR_Const::print_value_on(outputStream* out) const {
switch (type()) {
case T_ADDRESS:out->print("address:%d",as_jint()); break;
case T_INT: out->print("int:%d", as_jint()); break;
case T_LONG: out->print("lng:%lld", as_jlong()); break;
case T_LONG: out->print("lng:" JLONG_FORMAT, as_jlong()); break;
case T_FLOAT: out->print("flt:%f", as_jfloat()); break;
case T_DOUBLE: out->print("dbl:%f", as_jdouble()); break;
case T_OBJECT: out->print("obj:0x%x", as_jobject()); break;

View File

@ -147,7 +147,7 @@
"Inline methods containing exception handlers " \
"(NOTE: does not work with current backend)") \
\
develop(bool, InlineSynchronizedMethods, true, \
product(bool, InlineSynchronizedMethods, true, \
"Inline synchronized methods") \
\
develop(bool, InlineNIOCheckIndex, true, \

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
/* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -645,7 +645,7 @@ class CompileReplay : public StackObj {
java_mirror->bool_field_put(fd.offset(), value);
} else if (strcmp(field_signature, "J") == 0) {
jlong value;
if (sscanf(string_value, INT64_FORMAT, &value) != 1) {
if (sscanf(string_value, JLONG_FORMAT, &value) != 1) {
fprintf(stderr, "Error parsing long: %s\n", string_value);
return;
}

View File

@ -59,6 +59,19 @@ bool ciType::is_subtype_of(ciType* type) {
return false;
}
// ------------------------------------------------------------------
// ciType::name
//
// Return the name of this type
const char* ciType::name() {
if (is_primitive_type()) {
return type2name(basic_type());
} else {
assert(is_klass(), "must be");
return as_klass()->name()->as_utf8();
}
}
// ------------------------------------------------------------------
// ciType::print_impl
//
@ -73,7 +86,8 @@ void ciType::print_impl(outputStream* st) {
//
// Print the name of this type
void ciType::print_name_on(outputStream* st) {
st->print(type2name(basic_type()));
ResourceMark rm;
st->print(name());
}

View File

@ -77,6 +77,7 @@ public:
bool is_type() const { return true; }
bool is_classless() const { return is_primitive_type(); }
const char* name();
virtual void print_name_on(outputStream* st);
void print_name() {
print_name_on(tty);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,6 +59,7 @@
#include "services/classLoadingService.hpp"
#include "services/threadService.hpp"
#include "utilities/array.hpp"
#include "utilities/globalDefinitions.hpp"
// We generally try to create the oops directly when parsing, rather than
// allocating temporary data structures and copying the bytes twice. A
@ -970,6 +971,12 @@ void ClassFileParser::parse_field_attributes(ClassLoaderData* loader_data,
runtime_visible_annotations_length = attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
parse_annotations(loader_data,
runtime_visible_annotations,
runtime_visible_annotations_length,
cp,
parsed_annotations,
CHECK);
cfs->skip_u1(runtime_visible_annotations_length, CHECK);
} else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
runtime_invisible_annotations_length = attribute_length;
@ -1216,19 +1223,16 @@ Array<u2>* ClassFileParser::parse_fields(ClassLoaderData* loader_data,
field->initialize(access_flags.as_short(),
name_index,
signature_index,
constantvalue_index,
0);
if (parsed_annotations.has_any_annotations())
parsed_annotations.apply_to(field);
constantvalue_index);
BasicType type = cp->basic_type_for_signature_at(signature_index);
// Remember how many oops we encountered and compute allocation type
FieldAllocationType atype = fac->update(is_static, type);
field->set_allocation_type(atype);
// The correct offset is computed later (all oop fields will be located together)
// We temporarily store the allocation type in the offset field
field->set_offset(atype);
// After field is initialized with type, we can augment it with aux info
if (parsed_annotations.has_any_annotations())
parsed_annotations.apply_to(field);
}
int index = length;
@ -1259,17 +1263,13 @@ Array<u2>* ClassFileParser::parse_fields(ClassLoaderData* loader_data,
field->initialize(JVM_ACC_FIELD_INTERNAL,
injected[n].name_index,
injected[n].signature_index,
0,
0);
BasicType type = FieldType::basic_type(injected[n].signature());
// Remember how many oops we encountered and compute allocation type
FieldAllocationType atype = fac->update(false, type);
// The correct offset is computed later (all oop fields will be located together)
// We temporarily store the allocation type in the offset field
field->set_offset(atype);
field->set_allocation_type(atype);
index++;
}
}
@ -1735,7 +1735,8 @@ int ClassFileParser::skip_annotation_value(u1* buffer, int limit, int index) {
}
// Sift through annotations, looking for those significant to the VM:
void ClassFileParser::parse_annotations(u1* buffer, int limit,
void ClassFileParser::parse_annotations(ClassLoaderData* loader_data,
u1* buffer, int limit,
constantPoolHandle cp,
ClassFileParser::AnnotationCollector* coll,
TRAPS) {
@ -1752,9 +1753,12 @@ void ClassFileParser::parse_annotations(u1* buffer, int limit,
e_type_off = 7, // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;'
e_con_off = 9, // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME'
e_size = 11, // end of 'e' annotation
c_tag_val = 'c',
c_con_off = 7, // utf8 payload, such as 'I' or 'Ljava/lang/String;'
c_tag_val = 'c', // payload is type
c_con_off = 7, // utf8 payload, such as 'I'
c_size = 9, // end of 'c' annotation
s_tag_val = 's', // payload is String
s_con_off = 7, // utf8 payload, such as 'Ljava/lang/String;'
s_size = 9,
min_size = 6 // smallest possible size (zero members)
};
while ((--nann) >= 0 && (index-2 + min_size <= limit)) {
@ -1773,57 +1777,63 @@ void ClassFileParser::parse_annotations(u1* buffer, int limit,
}
// Here is where parsing particular annotations will take place.
AnnotationCollector::ID id = coll->annotation_index(aname);
AnnotationCollector::ID id = coll->annotation_index(loader_data, aname);
if (id == AnnotationCollector::_unknown) continue;
coll->set_annotation(id);
// If there are no values, just set the bit and move on:
if (count == 0) continue;
// For the record, here is how annotation payloads can be collected.
// Suppose we want to capture @Retention.value. Here is how:
//if (id == AnnotationCollector::_class_Retention) {
// Symbol* payload = NULL;
// if (count == 1
// && e_size == (index0 - index) // match size
// && e_tag_val == *(abase + tag_off)
// && (check_symbol_at(cp, Bytes::get_Java_u2(abase + e_type_off))
// == vmSymbols::RetentionPolicy_signature())
// && member == vmSymbols::value_name()) {
// payload = check_symbol_at(cp, Bytes::get_Java_u2(abase + e_con_off));
// }
// check_property(payload != NULL,
// "Invalid @Retention annotation at offset %u in class file %s",
// index0, CHECK);
// if (payload != NULL) {
// payload->increment_refcount();
// coll->_class_RetentionPolicy = payload;
// }
//}
if (id == AnnotationCollector::_sun_misc_Contended) {
if (count == 1
&& s_size == (index - index0) // match size
&& s_tag_val == *(abase + tag_off)
&& member == vmSymbols::value_name()) {
u2 group_index = Bytes::get_Java_u2(abase + s_con_off);
coll->set_contended_group(group_index);
} else {
coll->set_contended_group(0); // default contended group
}
coll->set_contended(true);
} else {
coll->set_contended(false);
}
}
}
ClassFileParser::AnnotationCollector::ID ClassFileParser::AnnotationCollector::annotation_index(Symbol* name) {
ClassFileParser::AnnotationCollector::ID
ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_data,
Symbol* name) {
vmSymbols::SID sid = vmSymbols::find_sid(name);
// Privileged code can use all annotations. Other code silently drops some.
bool privileged = loader_data->is_the_null_class_loader_data() ||
loader_data->is_anonymous();
switch (sid) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_ForceInline;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_DontInline_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_DontInline;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_LambdaForm_Compiled;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Hidden_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_LambdaForm_Hidden;
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Contended_signature):
if (_location != _in_field && _location != _in_class) break; // only allow for fields and classes
if (!EnableContended || (RestrictContended && !privileged)) break; // honor privileges
return _sun_misc_Contended;
default: break;
}
return AnnotationCollector::_unknown;
}
void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) {
fatal("no field annotations yet");
if (is_contended())
f->set_contended_group(contended_group());
}
void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
@ -1838,7 +1848,7 @@ void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
}
void ClassFileParser::ClassAnnotationCollector::apply_to(instanceKlassHandle k) {
fatal("no class annotations yet");
k->set_is_contended(is_contended());
}
@ -2148,9 +2158,21 @@ methodHandle ClassFileParser::parse_method(ClassLoaderData* loader_data,
cp, CHECK_(nullHandle));
} else if (method_attribute_name == vmSymbols::tag_method_parameters()) {
method_parameters_length = cfs->get_u1_fast();
// Track the actual size (note: this is written for clarity; a
// decent compiler will CSE and constant-fold this into a single
// expression)
u2 actual_size = 1;
method_parameters_data = cfs->get_u1_buffer();
actual_size += 2 * method_parameters_length;
cfs->skip_u2_fast(method_parameters_length);
actual_size += 4 * method_parameters_length;
cfs->skip_u4_fast(method_parameters_length);
// Enforce attribute length
if (method_attribute_length != actual_size) {
classfile_parse_error(
"Invalid MethodParameters method attribute length %u in class file %s",
method_attribute_length, CHECK_(nullHandle));
}
// ignore this attribute if it cannot be reflected
if (!SystemDictionary::Parameter_klass_loaded())
method_parameters_length = 0;
@ -2181,7 +2203,8 @@ methodHandle ClassFileParser::parse_method(ClassLoaderData* loader_data,
runtime_visible_annotations_length = method_attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
parse_annotations(runtime_visible_annotations,
parse_annotations(loader_data,
runtime_visible_annotations,
runtime_visible_annotations_length, cp, &parsed_annotations,
CHECK_(nullHandle));
cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
@ -2297,7 +2320,10 @@ methodHandle ClassFileParser::parse_method(ClassLoaderData* loader_data,
elem[i].name_cp_index =
Bytes::get_Java_u2(method_parameters_data);
method_parameters_data += 2;
elem[i].flags = Bytes::get_Java_u4(method_parameters_data);
u4 flags = Bytes::get_Java_u4(method_parameters_data);
// This caused an alignment fault on Sparc, if flags was a u4
elem[i].flags_lo = extract_low_short_from_int(flags);
elem[i].flags_hi = extract_high_short_from_int(flags);
method_parameters_data += 4;
}
}
@ -2475,26 +2501,38 @@ Array<Method*>* ClassFileParser::parse_methods(ClassLoaderData* loader_data,
*has_default_methods = true;
}
methods->at_put(index, method());
if (*methods_annotations == NULL) {
*methods_annotations =
MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
if (method_annotations != NULL) {
if (*methods_annotations == NULL) {
*methods_annotations =
MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
}
(*methods_annotations)->at_put(index, method_annotations);
}
(*methods_annotations)->at_put(index, method_annotations);
if (*methods_parameter_annotations == NULL) {
*methods_parameter_annotations =
MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
if (method_parameter_annotations != NULL) {
if (*methods_parameter_annotations == NULL) {
*methods_parameter_annotations =
MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
}
(*methods_parameter_annotations)->at_put(index, method_parameter_annotations);
}
(*methods_parameter_annotations)->at_put(index, method_parameter_annotations);
if (*methods_default_annotations == NULL) {
*methods_default_annotations =
MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
if (method_default_annotations != NULL) {
if (*methods_default_annotations == NULL) {
*methods_default_annotations =
MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
}
(*methods_default_annotations)->at_put(index, method_default_annotations);
}
(*methods_default_annotations)->at_put(index, method_default_annotations);
if (*methods_type_annotations == NULL) {
*methods_type_annotations =
MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
if (method_type_annotations != NULL) {
if (*methods_type_annotations == NULL) {
*methods_type_annotations =
MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
}
(*methods_type_annotations)->at_put(index, method_type_annotations);
}
(*methods_type_annotations)->at_put(index, method_type_annotations);
}
if (_need_verify && length > 1) {
@ -2886,7 +2924,8 @@ void ClassFileParser::parse_classfile_attributes(ClassLoaderData* loader_data,
runtime_visible_annotations_length = attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
parse_annotations(runtime_visible_annotations,
parse_annotations(loader_data,
runtime_visible_annotations,
runtime_visible_annotations_length,
cp,
parsed_annotations,
@ -3309,8 +3348,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
bool has_final_method = false;
AccessFlags promoted_flags;
promoted_flags.set_flags(0);
// These need to be oop pointers because they are allocated lazily
// inside parse_methods inside a nested HandleMark
Array<AnnotationArray*>* methods_annotations = NULL;
Array<AnnotationArray*>* methods_parameter_annotations = NULL;
Array<AnnotationArray*>* methods_default_annotations = NULL;
@ -3405,18 +3443,21 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
// Size of Java itable (in words)
itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(transitive_interfaces);
// get the padding width from the option
// TODO: Ask VM about specific CPU we are running on
int pad_size = ContendedPaddingWidth;
// Field size and offset computation
int nonstatic_field_size = super_klass() == NULL ? 0 : super_klass->nonstatic_field_size();
#ifndef PRODUCT
int orig_nonstatic_field_size = 0;
#endif
int static_field_size = 0;
int next_static_oop_offset;
int next_static_double_offset;
int next_static_word_offset;
int next_static_short_offset;
int next_static_byte_offset;
int next_static_type_offset;
int next_static_padded_offset;
int next_nonstatic_oop_offset;
int next_nonstatic_double_offset;
int next_nonstatic_word_offset;
@ -3426,11 +3467,36 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
int first_nonstatic_oop_offset;
int first_nonstatic_field_offset;
int next_nonstatic_field_offset;
int next_nonstatic_padded_offset;
// Count the contended fields by type.
int static_contended_count = 0;
int nonstatic_contended_count = 0;
FieldAllocationCount fac_contended;
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
if (fs.is_contended()) {
fac_contended.count[atype]++;
if (fs.access_flags().is_static()) {
static_contended_count++;
} else {
nonstatic_contended_count++;
}
}
}
int contended_count = static_contended_count + nonstatic_contended_count;
// Calculate the starting byte offsets
next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
// class is contended, pad before all the fields
if (parsed_annotations.is_contended()) {
next_static_oop_offset += pad_size;
}
next_static_double_offset = next_static_oop_offset +
(fac.count[STATIC_OOP] * heapOopSize);
((fac.count[STATIC_OOP] - fac_contended.count[STATIC_OOP]) * heapOopSize);
if ( fac.count[STATIC_DOUBLE] &&
(Universe::field_type_should_be_aligned(T_DOUBLE) ||
Universe::field_type_should_be_aligned(T_LONG)) ) {
@ -3438,25 +3504,29 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
}
next_static_word_offset = next_static_double_offset +
(fac.count[STATIC_DOUBLE] * BytesPerLong);
((fac.count[STATIC_DOUBLE] - fac_contended.count[STATIC_DOUBLE]) * BytesPerLong);
next_static_short_offset = next_static_word_offset +
(fac.count[STATIC_WORD] * BytesPerInt);
((fac.count[STATIC_WORD] - fac_contended.count[STATIC_WORD]) * BytesPerInt);
next_static_byte_offset = next_static_short_offset +
(fac.count[STATIC_SHORT] * BytesPerShort);
next_static_type_offset = align_size_up((next_static_byte_offset +
fac.count[STATIC_BYTE] ), wordSize );
static_field_size = (next_static_type_offset -
next_static_oop_offset) / wordSize;
((fac.count[STATIC_SHORT] - fac_contended.count[STATIC_SHORT]) * BytesPerShort);
next_static_padded_offset = next_static_byte_offset +
((fac.count[STATIC_BYTE] - fac_contended.count[STATIC_BYTE]) * 1);
first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() +
nonstatic_field_size * heapOopSize;
// class is contended, pad before all the fields
if (parsed_annotations.is_contended()) {
first_nonstatic_field_offset += pad_size;
}
next_nonstatic_field_offset = first_nonstatic_field_offset;
unsigned int nonstatic_double_count = fac.count[NONSTATIC_DOUBLE];
unsigned int nonstatic_word_count = fac.count[NONSTATIC_WORD];
unsigned int nonstatic_short_count = fac.count[NONSTATIC_SHORT];
unsigned int nonstatic_byte_count = fac.count[NONSTATIC_BYTE];
unsigned int nonstatic_oop_count = fac.count[NONSTATIC_OOP];
unsigned int nonstatic_double_count = fac.count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
unsigned int nonstatic_word_count = fac.count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
unsigned int nonstatic_short_count = fac.count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
unsigned int nonstatic_byte_count = fac.count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
unsigned int nonstatic_oop_count = fac.count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
bool super_has_nonstatic_fields =
(super_klass() != NULL && super_klass->has_nonstatic_fields());
@ -3529,12 +3599,12 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
}
if( allocation_style == 0 ) {
// Fields order: oops, longs/doubles, ints, shorts/chars, bytes
// Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields
next_nonstatic_oop_offset = next_nonstatic_field_offset;
next_nonstatic_double_offset = next_nonstatic_oop_offset +
(nonstatic_oop_count * heapOopSize);
} else if( allocation_style == 1 ) {
// Fields order: longs/doubles, ints, shorts/chars, bytes, oops
// Fields order: longs/doubles, ints, shorts/chars, bytes, oops, padded fields
next_nonstatic_double_offset = next_nonstatic_field_offset;
} else if( allocation_style == 2 ) {
// Fields allocation: oops fields in super and sub classes are together.
@ -3613,27 +3683,33 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
(nonstatic_word_count * BytesPerInt);
next_nonstatic_byte_offset = next_nonstatic_short_offset +
(nonstatic_short_count * BytesPerShort);
next_nonstatic_padded_offset = next_nonstatic_byte_offset +
nonstatic_byte_count;
int notaligned_offset;
if( allocation_style == 0 ) {
notaligned_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
} else { // allocation_style == 1
next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
// let oops jump before padding with this allocation style
if( allocation_style == 1 ) {
next_nonstatic_oop_offset = next_nonstatic_padded_offset;
if( nonstatic_oop_count > 0 ) {
next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
}
notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
}
next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize );
nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
- first_nonstatic_field_offset)/heapOopSize);
// Iterate over fields again and compute correct offsets.
// The field allocation type was temporarily stored in the offset slot.
// oop fields are located before non-oop fields (static and non-static).
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
// skip already laid out fields
if (fs.is_offset_set()) continue;
// contended fields are handled below
if (fs.is_contended()) continue;
int real_offset;
FieldAllocationType atype = (FieldAllocationType) fs.offset();
FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
// pack the rest of the fields
switch (atype) {
case STATIC_OOP:
real_offset = next_static_oop_offset;
@ -3722,13 +3798,225 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
fs.set_offset(real_offset);
}
// Handle the contended cases.
//
// Each contended field should not intersect the cache line with another contended field.
// In the absence of alignment information, we end up with pessimistically separating
// the fields with full-width padding.
//
// Additionally, this should not break alignment for the fields, so we round the alignment up
// for each field.
if (contended_count > 0) {
// if there is at least one contended field, we need to have pre-padding for them
if (nonstatic_contended_count > 0) {
next_nonstatic_padded_offset += pad_size;
}
// collect all contended groups
BitMap bm(cp->size());
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
// skip already laid out fields
if (fs.is_offset_set()) continue;
if (fs.is_contended()) {
bm.set_bit(fs.contended_group());
}
}
int current_group = -1;
while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
// skip already laid out fields
if (fs.is_offset_set()) continue;
// skip non-contended fields and fields from different group
if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
// handle statics below
if (fs.access_flags().is_static()) continue;
int real_offset;
FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
switch (atype) {
case NONSTATIC_BYTE:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, 1);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += 1;
break;
case NONSTATIC_SHORT:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerShort);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerShort;
break;
case NONSTATIC_WORD:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerInt);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerInt;
break;
case NONSTATIC_DOUBLE:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerLong);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerLong;
break;
case NONSTATIC_OOP:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, heapOopSize);
real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += heapOopSize;
// Create new oop map
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
nonstatic_oop_map_count += 1;
if( first_nonstatic_oop_offset == 0 ) { // Undefined
first_nonstatic_oop_offset = real_offset;
}
break;
default:
ShouldNotReachHere();
}
if (fs.contended_group() == 0) {
// Contended group defines the equivalence class over the fields:
// the fields within the same contended group are not inter-padded.
// The only exception is default group, which does not incur the
// equivalence, and so requires intra-padding.
next_nonstatic_padded_offset += pad_size;
}
fs.set_offset(real_offset);
} // for
// Start laying out the next group.
// Note that this will effectively pad the last group in the back;
// this is expected to alleviate memory contention effects for
// subclass fields and/or adjacent object.
// If this was the default group, the padding is already in place.
if (current_group != 0) {
next_nonstatic_padded_offset += pad_size;
}
}
// handle static fields
// if there is at least one contended field, we need to have pre-padding for them
if (static_contended_count > 0) {
next_static_padded_offset += pad_size;
}
current_group = -1;
while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
// skip already laid out fields
if (fs.is_offset_set()) continue;
// skip non-contended fields and fields from different group
if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
// non-statics already handled above
if (!fs.access_flags().is_static()) continue;
int real_offset;
FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
switch (atype) {
case STATIC_BYTE:
next_static_padded_offset = align_size_up(next_static_padded_offset, 1);
real_offset = next_static_padded_offset;
next_static_padded_offset += 1;
break;
case STATIC_SHORT:
next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerShort);
real_offset = next_static_padded_offset;
next_static_padded_offset += BytesPerShort;
break;
case STATIC_WORD:
next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerInt);
real_offset = next_static_padded_offset;
next_static_padded_offset += BytesPerInt;
break;
case STATIC_DOUBLE:
next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerLong);
real_offset = next_static_padded_offset;
next_static_padded_offset += BytesPerLong;
break;
case STATIC_OOP:
next_static_padded_offset = align_size_up(next_static_padded_offset, heapOopSize);
real_offset = next_static_padded_offset;
next_static_padded_offset += heapOopSize;
break;
default:
ShouldNotReachHere();
}
if (fs.contended_group() == 0) {
// Contended group defines the equivalence class over the fields:
// the fields within the same contended group are not inter-padded.
// The only exception is default group, which does not incur the
// equivalence, and so requires intra-padding.
next_static_padded_offset += pad_size;
}
fs.set_offset(real_offset);
} // for
// Start laying out the next group.
// Note that this will effectively pad the last group in the back;
// this is expected to alleviate memory contention effects for
// subclass fields and/or adjacent object.
// If this was the default group, the padding is already in place.
if (current_group != 0) {
next_static_padded_offset += pad_size;
}
}
} // handle contended
// Size of instances
int instance_size;
int notaligned_offset = next_nonstatic_padded_offset;
// Entire class is contended, pad in the back.
// This helps to alleviate memory contention effects for subclass fields
// and/or adjacent object.
if (parsed_annotations.is_contended()) {
notaligned_offset += pad_size;
next_static_padded_offset += pad_size;
}
int next_static_type_offset = align_size_up(next_static_padded_offset, wordSize);
int static_field_size = (next_static_type_offset -
InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize );
nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
- first_nonstatic_field_offset)/heapOopSize);
next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
instance_size = align_object_size(next_nonstatic_type_offset / wordSize);
assert(instance_size == align_object_size(align_size_up((instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), wordSize) / wordSize), "consistent layout helper value");
assert(instance_size == align_object_size(align_size_up(
(instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize + ((parsed_annotations.is_contended()) ? pad_size : 0)),
wordSize) / wordSize), "consistent layout helper value");
// Number of non-static oop map blocks allocated at end of klass.
const unsigned int total_oop_map_count =
@ -3912,7 +4200,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
// check that if this class is an interface then it doesn't have static methods
if (this_klass->is_interface()) {
check_illegal_static_method(this_klass, CHECK_(nullHandle));
/* An interface in a JAVA 8 classfile can be static */
if (_major_version < JAVA_8_VERSION) {
check_illegal_static_method(this_klass, CHECK_(nullHandle));
}
}
@ -4005,6 +4296,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
}
#endif
#ifndef PRODUCT
if (PrintFieldLayout) {
print_field_layout(name,
fields,
cp,
instance_size,
first_nonstatic_field_offset,
next_nonstatic_field_offset,
next_static_type_offset);
}
#endif
// preserve result across HandleMark
preserve_this_klass = this_klass();
}
@ -4017,6 +4320,38 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
return this_klass;
}
void ClassFileParser::print_field_layout(Symbol* name,
Array<u2>* fields,
constantPoolHandle cp,
int instance_size,
int instance_fields_start,
int instance_fields_end,
int static_fields_end) {
tty->print("%s: field layout\n", name->as_klass_external_name());
tty->print(" @%3d %s\n", instance_fields_start, "--- instance fields start ---");
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
if (!fs.access_flags().is_static()) {
tty->print(" @%3d \"%s\" %s\n",
fs.offset(),
fs.name()->as_klass_external_name(),
fs.signature()->as_klass_external_name());
}
}
tty->print(" @%3d %s\n", instance_fields_end, "--- instance fields end ---");
tty->print(" @%3d %s\n", instance_size * wordSize, "--- instance ends ---");
tty->print(" @%3d %s\n", InstanceMirrorKlass::offset_of_static_fields(), "--- static fields start ---");
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
tty->print(" @%3d \"%s\" %s\n",
fs.offset(),
fs.name()->as_klass_external_name(),
fs.signature()->as_klass_external_name());
}
}
tty->print(" @%3d %s\n", static_fields_end, "--- static fields end ---");
tty->print("\n");
}
unsigned int
ClassFileParser::compute_oop_map_count(instanceKlassHandle super,
unsigned int nonstatic_oop_map_count,
@ -4466,6 +4801,7 @@ void ClassFileParser::verify_legal_method_modifiers(
const bool is_bridge = (flags & JVM_ACC_BRIDGE) != 0;
const bool is_strict = (flags & JVM_ACC_STRICT) != 0;
const bool is_synchronized = (flags & JVM_ACC_SYNCHRONIZED) != 0;
const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0;
const bool major_gte_15 = _major_version >= JAVA_1_5_VERSION;
const bool major_gte_8 = _major_version >= JAVA_8_VERSION;
const bool is_initializer = (name == vmSymbols::object_initializer_name());
@ -4473,11 +4809,33 @@ void ClassFileParser::verify_legal_method_modifiers(
bool is_illegal = false;
if (is_interface) {
if (!is_public || is_static || is_final || is_native ||
((is_synchronized || is_strict) && major_gte_15 &&
(!major_gte_8 || is_abstract)) ||
(!major_gte_8 && !is_abstract)) {
is_illegal = true;
if (major_gte_8) {
// Class file version is JAVA_8_VERSION or later Methods of
// interfaces may set any of the flags except ACC_PROTECTED,
// ACC_FINAL, ACC_NATIVE, and ACC_SYNCHRONIZED; they must
// have exactly one of the ACC_PUBLIC or ACC_PRIVATE flags set.
if ((is_public == is_private) || /* Only one of private and public should be true - XNOR */
(is_native || is_protected || is_final || is_synchronized) ||
// If a specific method of a class or interface has its
// ACC_ABSTRACT flag set, it must not have any of its
// ACC_FINAL, ACC_NATIVE, ACC_PRIVATE, ACC_STATIC,
// ACC_STRICT, or ACC_SYNCHRONIZED flags set. No need to
// check for ACC_FINAL, ACC_NATIVE or ACC_SYNCHRONIZED as
// those flags are illegal irrespective of ACC_ABSTRACT being set or not.
(is_abstract && (is_private || is_static || is_strict))) {
is_illegal = true;
}
} else if (major_gte_15) {
// Class file version in the interval [JAVA_1_5_VERSION, JAVA_8_VERSION)
if (!is_public || is_static || is_final || is_synchronized ||
is_native || !is_abstract || is_strict) {
is_illegal = true;
}
} else {
// Class file version is pre-JAVA_1_5_VERSION
if (!is_public || is_static || is_final || is_native || !is_abstract) {
is_illegal = true;
}
}
} else { // not interface
if (is_initializer) {

View File

@ -95,17 +95,20 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
_method_DontInline,
_method_LambdaForm_Compiled,
_method_LambdaForm_Hidden,
_sun_misc_Contended,
_annotation_LIMIT
};
const Location _location;
int _annotations_present;
u2 _contended_group;
AnnotationCollector(Location location)
: _location(location), _annotations_present(0)
{
assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, "");
}
// If this annotation name has an ID, report it (or _none).
ID annotation_index(Symbol* name);
ID annotation_index(ClassLoaderData* loader_data, Symbol* name);
// Set the annotation name:
void set_annotation(ID id) {
assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
@ -114,6 +117,12 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
// Report if the annotation is present.
bool has_any_annotations() { return _annotations_present != 0; }
bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; }
void set_contended_group(u2 group) { _contended_group = group; }
u2 contended_group() { return _contended_group; }
void set_contended(bool contended) { set_annotation(_sun_misc_Contended); }
bool is_contended() { return has_annotation(_sun_misc_Contended); }
};
class FieldAnnotationCollector: public AnnotationCollector {
public:
@ -177,6 +186,14 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Array<AnnotationArray*>** fields_type_annotations,
u2* java_fields_count_ptr, TRAPS);
void print_field_layout(Symbol* name,
Array<u2>* fields,
constantPoolHandle cp,
int instance_size,
int instance_fields_start,
int instance_fields_end,
int static_fields_end);
// Method parsing
methodHandle parse_method(ClassLoaderData* loader_data,
constantPoolHandle cp,
@ -247,7 +264,8 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
int runtime_invisible_annotations_length, TRAPS);
int skip_annotation(u1* buffer, int limit, int index);
int skip_annotation_value(u1* buffer, int limit, int index);
void parse_annotations(u1* buffer, int limit, constantPoolHandle cp,
void parse_annotations(ClassLoaderData* loader_data,
u1* buffer, int limit, constantPoolHandle cp,
/* Results (currently, only one result is supported): */
AnnotationCollector* result,
TRAPS);

View File

@ -318,6 +318,7 @@ ClassLoaderData::~ClassLoaderData() {
}
Metaspace* ClassLoaderData::metaspace_non_null() {
assert(!DumpSharedSpaces, "wrong metaspace!");
// If the metaspace has not been allocated, create a new one. Might want
// to create smaller arena for Reflection class loaders also.
// The reason for the delayed allocation is because some class loaders are

View File

@ -1285,13 +1285,15 @@ static void merge_in_new_methods(InstanceKlass* klass,
enum { ANNOTATIONS, PARAMETERS, DEFAULTS, NUM_ARRAYS };
Array<AnnotationArray*>* original_annots[NUM_ARRAYS];
Array<AnnotationArray*>* original_annots[NUM_ARRAYS] = { NULL };
Array<Method*>* original_methods = klass->methods();
Annotations* annots = klass->annotations();
original_annots[ANNOTATIONS] = annots->methods_annotations();
original_annots[PARAMETERS] = annots->methods_parameter_annotations();
original_annots[DEFAULTS] = annots->methods_default_annotations();
if (annots != NULL) {
original_annots[ANNOTATIONS] = annots->methods_annotations();
original_annots[PARAMETERS] = annots->methods_parameter_annotations();
original_annots[DEFAULTS] = annots->methods_default_annotations();
}
Array<int>* original_ordering = klass->method_ordering();
Array<int>* merged_ordering = Universe::the_empty_int_array();
@ -1370,9 +1372,15 @@ static void merge_in_new_methods(InstanceKlass* klass,
// Replace klass methods with new merged lists
klass->set_methods(merged_methods);
annots->set_methods_annotations(merged_annots[ANNOTATIONS]);
annots->set_methods_parameter_annotations(merged_annots[PARAMETERS]);
annots->set_methods_default_annotations(merged_annots[DEFAULTS]);
if (annots != NULL) {
annots->set_methods_annotations(merged_annots[ANNOTATIONS]);
annots->set_methods_parameter_annotations(merged_annots[PARAMETERS]);
annots->set_methods_default_annotations(merged_annots[DEFAULTS]);
} else {
assert(merged_annots[ANNOTATIONS] == NULL, "Must be");
assert(merged_annots[PARAMETERS] == NULL, "Must be");
assert(merged_annots[DEFAULTS] == NULL, "Must be");
}
ClassLoaderData* cld = klass->class_loader_data();
MetadataFactory::free_array(cld, original_methods);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -687,19 +687,6 @@ void java_lang_Class::set_array_klass(oop java_class, Klass* klass) {
}
Method* java_lang_Class::resolved_constructor(oop java_class) {
Metadata* constructor = java_class->metadata_field(_resolved_constructor_offset);
assert(constructor == NULL || constructor->is_method(), "should be method");
return ((Method*)constructor);
}
void java_lang_Class::set_resolved_constructor(oop java_class, Method* constructor) {
assert(constructor->is_method(), "should be method");
java_class->metadata_field_put(_resolved_constructor_offset, constructor);
}
bool java_lang_Class::is_primitive(oop java_class) {
// should assert:
//assert(java_lang_Class::is_instance(java_class), "must be a Class object");
@ -924,7 +911,6 @@ jlong java_lang_Thread::stackSize(oop java_thread) {
// Write the thread status value to threadStatus field in java.lang.Thread java class.
void java_lang_Thread::set_thread_status(oop java_thread,
java_lang_Thread::ThreadStatus status) {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "Java Thread is not running in vm");
// The threadStatus is only present starting in 1.5
if (_thread_status_offset > 0) {
java_thread->int_field_put(_thread_status_offset, status);
@ -1158,179 +1144,43 @@ void java_lang_Throwable::print(Handle throwable, outputStream* st) {
}
}
// Print stack trace element to resource allocated buffer
char* java_lang_Throwable::print_stack_element_to_buffer(Method* method, int bci) {
// Get strings and string lengths
InstanceKlass* klass = method->method_holder();
const char* klass_name = klass->external_name();
int buf_len = (int)strlen(klass_name);
char* source_file_name;
if (klass->source_file_name() == NULL) {
source_file_name = NULL;
} else {
source_file_name = klass->source_file_name()->as_C_string();
buf_len += (int)strlen(source_file_name);
}
char* method_name = method->name()->as_C_string();
buf_len += (int)strlen(method_name);
// After this many redefines, the stack trace is unreliable.
const int MAX_VERSION = USHRT_MAX;
// Allocate temporary buffer with extra space for formatting and line number
char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
// Helper backtrace functions to store bci|version together.
static inline int merge_bci_and_version(int bci, int version) {
// only store u2 for version, checking for overflow.
if (version > USHRT_MAX || version < 0) version = MAX_VERSION;
assert((jushort)bci == bci, "bci should be short");
return build_int_from_shorts(version, bci);
}
// Print stack trace line in buffer
sprintf(buf, "\tat %s.%s", klass_name, method_name);
static inline int bci_at(unsigned int merged) {
return extract_high_short_from_int(merged);
}
static inline int version_at(unsigned int merged) {
return extract_low_short_from_int(merged);
}
static inline bool version_matches(Method* method, int version) {
return (method->constants()->version() == version && version < MAX_VERSION);
}
static inline int get_line_number(Method* method, int bci) {
int line_number = 0;
if (method->is_native()) {
strcat(buf, "(Native Method)");
// Negative value different from -1 below, enabling Java code in
// class java.lang.StackTraceElement to distinguish "native" from
// "no LineNumberTable". JDK tests for -2.
line_number = -2;
} else {
int line_number = method->line_number_from_bci(bci);
if (source_file_name != NULL && (line_number != -1)) {
// Sourcename and linenumber
sprintf(buf + (int)strlen(buf), "(%s:%d)", source_file_name, line_number);
} else if (source_file_name != NULL) {
// Just sourcename
sprintf(buf + (int)strlen(buf), "(%s)", source_file_name);
} else {
// Neither soucename and linenumber
sprintf(buf + (int)strlen(buf), "(Unknown Source)");
}
nmethod* nm = method->code();
if (WizardMode && nm != NULL) {
sprintf(buf + (int)strlen(buf), "(nmethod " INTPTR_FORMAT ")", (intptr_t)nm);
// Returns -1 if no LineNumberTable, and otherwise actual line number
line_number = method->line_number_from_bci(bci);
if (line_number == -1 && ShowHiddenFrames) {
line_number = bci + 1000000;
}
}
return buf;
}
void java_lang_Throwable::print_stack_element(Handle stream, Method* method, int bci) {
ResourceMark rm;
char* buf = print_stack_element_to_buffer(method, bci);
print_to_stream(stream, buf);
}
void java_lang_Throwable::print_stack_element(outputStream *st, Method* method, int bci) {
ResourceMark rm;
char* buf = print_stack_element_to_buffer(method, bci);
st->print_cr("%s", buf);
}
void java_lang_Throwable::print_to_stream(Handle stream, const char* str) {
if (stream.is_null()) {
tty->print_cr("%s", str);
} else {
EXCEPTION_MARK;
JavaValue result(T_VOID);
Handle arg (THREAD, oopFactory::new_charArray(str, THREAD));
if (!HAS_PENDING_EXCEPTION) {
JavaCalls::call_virtual(&result,
stream,
KlassHandle(THREAD, stream->klass()),
vmSymbols::println_name(),
vmSymbols::char_array_void_signature(),
arg,
THREAD);
}
// Ignore any exceptions. we are in the middle of exception handling. Same as classic VM.
if (HAS_PENDING_EXCEPTION) CLEAR_PENDING_EXCEPTION;
}
}
const char* java_lang_Throwable::no_stack_trace_message() {
return "\t<<no stack trace available>>";
}
// Currently used only for exceptions occurring during startup
void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
Thread *THREAD = Thread::current();
Handle h_throwable(THREAD, throwable);
while (h_throwable.not_null()) {
objArrayHandle result (THREAD, objArrayOop(backtrace(h_throwable())));
if (result.is_null()) {
st->print_cr(no_stack_trace_message());
return;
}
while (result.not_null()) {
typeArrayHandle methods (THREAD,
typeArrayOop(result->obj_at(trace_methods_offset)));
typeArrayHandle bcis (THREAD,
typeArrayOop(result->obj_at(trace_bcis_offset)));
if (methods.is_null() || bcis.is_null()) {
st->print_cr(no_stack_trace_message());
return;
}
int length = methods()->length();
for (int index = 0; index < length; index++) {
Method* method = ((Method*)methods()->metadata_at(index));
if (method == NULL) goto handle_cause;
int bci = bcis->ushort_at(index);
print_stack_element(st, method, bci);
}
result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
}
handle_cause:
{
EXCEPTION_MARK;
JavaValue result(T_OBJECT);
JavaCalls::call_virtual(&result,
h_throwable,
KlassHandle(THREAD, h_throwable->klass()),
vmSymbols::getCause_name(),
vmSymbols::void_throwable_signature(),
THREAD);
// Ignore any exceptions. we are in the middle of exception handling. Same as classic VM.
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
h_throwable = Handle();
} else {
h_throwable = Handle(THREAD, (oop) result.get_jobject());
if (h_throwable.not_null()) {
st->print("Caused by: ");
print(h_throwable, st);
st->cr();
}
}
}
}
}
void java_lang_Throwable::print_stack_trace(oop throwable, oop print_stream) {
// Note: this is no longer used in Merlin, but we support it for compatibility.
Thread *thread = Thread::current();
Handle stream(thread, print_stream);
objArrayHandle result (thread, objArrayOop(backtrace(throwable)));
if (result.is_null()) {
print_to_stream(stream, no_stack_trace_message());
return;
}
while (result.not_null()) {
typeArrayHandle methods(thread,
typeArrayOop(result->obj_at(trace_methods_offset)));
typeArrayHandle bcis (thread,
typeArrayOop(result->obj_at(trace_bcis_offset)));
if (methods.is_null() || bcis.is_null()) {
print_to_stream(stream, no_stack_trace_message());
return;
}
int length = methods()->length();
for (int index = 0; index < length; index++) {
Method* method = ((Method*)methods()->metadata_at(index));
if (method == NULL) return;
int bci = bcis->ushort_at(index);
print_stack_element(stream, method, bci);
}
result = objArrayHandle(thread, objArrayOop(result->obj_at(trace_next_offset)));
}
return line_number;
}
// This class provides a simple wrapper over the internal structure of
@ -1350,13 +1200,30 @@ class BacktraceBuilder: public StackObj {
enum {
trace_methods_offset = java_lang_Throwable::trace_methods_offset,
trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
trace_mirrors_offset = java_lang_Throwable::trace_mirrors_offset,
trace_next_offset = java_lang_Throwable::trace_next_offset,
trace_size = java_lang_Throwable::trace_size,
trace_chunk_size = java_lang_Throwable::trace_chunk_size
};
// get info out of chunks
static typeArrayOop get_methods(objArrayHandle chunk) {
typeArrayOop methods = typeArrayOop(chunk->obj_at(trace_methods_offset));
assert(methods != NULL, "method array should be initialized in backtrace");
return methods;
}
static typeArrayOop get_bcis(objArrayHandle chunk) {
typeArrayOop bcis = typeArrayOop(chunk->obj_at(trace_bcis_offset));
assert(bcis != NULL, "bci array should be initialized in backtrace");
return bcis;
}
static objArrayOop get_mirrors(objArrayHandle chunk) {
objArrayOop mirrors = objArrayOop(chunk->obj_at(trace_mirrors_offset));
assert(mirrors != NULL, "mirror array should be initialized in backtrace");
return mirrors;
}
// constructor for new backtrace
BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL) {
expand(CHECK);
@ -1364,6 +1231,19 @@ class BacktraceBuilder: public StackObj {
_index = 0;
}
BacktraceBuilder(objArrayHandle backtrace) {
_methods = get_methods(backtrace);
_bcis = get_bcis(backtrace);
_mirrors = get_mirrors(backtrace);
assert(_methods->length() == _bcis->length() &&
_methods->length() == _mirrors->length(),
"method and source information arrays should match");
// head is the preallocated backtrace
_backtrace = _head = backtrace();
_index = 0;
}
void expand(TRAPS) {
objArrayHandle old_head(THREAD, _head);
Pause_No_Safepoint_Verifier pnsv(&_nsv);
@ -1371,10 +1251,10 @@ class BacktraceBuilder: public StackObj {
objArrayOop head = oopFactory::new_objectArray(trace_size, CHECK);
objArrayHandle new_head(THREAD, head);
typeArrayOop methods = oopFactory::new_metaDataArray(trace_chunk_size, CHECK);
typeArrayOop methods = oopFactory::new_shortArray(trace_chunk_size, CHECK);
typeArrayHandle new_methods(THREAD, methods);
typeArrayOop bcis = oopFactory::new_shortArray(trace_chunk_size, CHECK);
typeArrayOop bcis = oopFactory::new_intArray(trace_chunk_size, CHECK);
typeArrayHandle new_bcis(THREAD, bcis);
objArrayOop mirrors = oopFactory::new_objectArray(trace_chunk_size, CHECK);
@ -1389,7 +1269,7 @@ class BacktraceBuilder: public StackObj {
_head = new_head();
_methods = new_methods();
_bcis = new_bcis();
_bcis = new_bcis();
_mirrors = new_mirrors();
_index = 0;
}
@ -1403,7 +1283,6 @@ class BacktraceBuilder: public StackObj {
// shorts. The later line number lookup would just smear the -1
// to a 0 even if it could be recorded.
if (bci == SynchronizationEntryBCI) bci = 0;
assert(bci == (jushort)bci, "doesn't fit");
if (_index >= trace_chunk_size) {
methodHandle mhandle(THREAD, method);
@ -1411,26 +1290,148 @@ class BacktraceBuilder: public StackObj {
method = mhandle();
}
_methods->metadata_at_put(_index, method);
_bcis->ushort_at_put(_index, bci);
// we need to save the mirrors in the backtrace to keep the methods from
// being unloaded if their class loader is unloaded while we still have
// this stack trace.
_methods->short_at_put(_index, method->method_idnum());
_bcis->int_at_put(_index, merge_bci_and_version(bci, method->constants()->version()));
// We need to save the mirrors in the backtrace to keep the class
// from being unloaded while we still have this stack trace.
assert(method->method_holder()->java_mirror() != NULL, "never push null for mirror");
_mirrors->obj_at_put(_index, method->method_holder()->java_mirror());
_index++;
}
Method* current_method() {
assert(_index >= 0 && _index < trace_chunk_size, "out of range");
return ((Method*)_methods->metadata_at(_index));
}
jushort current_bci() {
assert(_index >= 0 && _index < trace_chunk_size, "out of range");
return _bcis->ushort_at(_index);
}
};
// Print stack trace element to resource allocated buffer
char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
int method_id, int version, int bci) {
// Get strings and string lengths
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
const char* klass_name = holder->external_name();
int buf_len = (int)strlen(klass_name);
// pushing to the stack trace added one.
Method* method = holder->method_with_idnum(method_id);
char* method_name = method->name()->as_C_string();
buf_len += (int)strlen(method_name);
char* source_file_name = NULL;
if (version_matches(method, version)) {
Symbol* source = holder->source_file_name();
if (source != NULL) {
source_file_name = source->as_C_string();
buf_len += (int)strlen(source_file_name);
}
}
// Allocate temporary buffer with extra space for formatting and line number
char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
// Print stack trace line in buffer
sprintf(buf, "\tat %s.%s", klass_name, method_name);
if (!version_matches(method, version)) {
strcat(buf, "(Redefined)");
} else {
int line_number = get_line_number(method, bci);
if (line_number == -2) {
strcat(buf, "(Native Method)");
} else {
if (source_file_name != NULL && (line_number != -1)) {
// Sourcename and linenumber
sprintf(buf + (int)strlen(buf), "(%s:%d)", source_file_name, line_number);
} else if (source_file_name != NULL) {
// Just sourcename
sprintf(buf + (int)strlen(buf), "(%s)", source_file_name);
} else {
// Neither sourcename nor linenumber
sprintf(buf + (int)strlen(buf), "(Unknown Source)");
}
nmethod* nm = method->code();
if (WizardMode && nm != NULL) {
sprintf(buf + (int)strlen(buf), "(nmethod " INTPTR_FORMAT ")", (intptr_t)nm);
}
}
}
return buf;
}
void java_lang_Throwable::print_stack_element(outputStream *st, Handle mirror,
int method_id, int version, int bci) {
ResourceMark rm;
char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci);
st->print_cr("%s", buf);
}
void java_lang_Throwable::print_stack_element(outputStream *st, methodHandle method, int bci) {
Handle mirror = method->method_holder()->java_mirror();
int method_id = method->method_idnum();
int version = method->constants()->version();
print_stack_element(st, mirror, method_id, version, bci);
}
const char* java_lang_Throwable::no_stack_trace_message() {
return "\t<<no stack trace available>>";
}
// Currently used only for exceptions occurring during startup
void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
Thread *THREAD = Thread::current();
Handle h_throwable(THREAD, throwable);
while (h_throwable.not_null()) {
objArrayHandle result (THREAD, objArrayOop(backtrace(h_throwable())));
if (result.is_null()) {
st->print_cr(no_stack_trace_message());
return;
}
while (result.not_null()) {
// Get method id, bci, version and mirror from chunk
typeArrayHandle methods (THREAD, BacktraceBuilder::get_methods(result));
typeArrayHandle bcis (THREAD, BacktraceBuilder::get_bcis(result));
objArrayHandle mirrors (THREAD, BacktraceBuilder::get_mirrors(result));
int length = methods()->length();
for (int index = 0; index < length; index++) {
Handle mirror(THREAD, mirrors->obj_at(index));
// NULL mirror means end of stack trace
if (mirror.is_null()) goto handle_cause;
int method = methods->short_at(index);
int version = version_at(bcis->int_at(index));
int bci = bci_at(bcis->int_at(index));
print_stack_element(st, mirror, method, version, bci);
}
result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
}
handle_cause:
{
EXCEPTION_MARK;
JavaValue cause(T_OBJECT);
JavaCalls::call_virtual(&cause,
h_throwable,
KlassHandle(THREAD, h_throwable->klass()),
vmSymbols::getCause_name(),
vmSymbols::void_throwable_signature(),
THREAD);
// Ignore any exceptions. we are in the middle of exception handling. Same as classic VM.
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
h_throwable = Handle();
} else {
h_throwable = Handle(THREAD, (oop) cause.get_jobject());
if (h_throwable.not_null()) {
st->print("Caused by: ");
print(h_throwable, st);
st->cr();
}
}
}
}
}
void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle method, TRAPS) {
if (!StackTraceInThrowable) return;
@ -1591,21 +1592,8 @@ void java_lang_Throwable::allocate_backtrace(Handle throwable, TRAPS) {
// No-op if stack trace is disabled
if (!StackTraceInThrowable) return;
objArrayOop h_oop = oopFactory::new_objectArray(trace_size, CHECK);
objArrayHandle backtrace (THREAD, h_oop);
typeArrayOop m_oop = oopFactory::new_metaDataArray(trace_chunk_size, CHECK);
typeArrayHandle methods (THREAD, m_oop);
typeArrayOop b = oopFactory::new_shortArray(trace_chunk_size, CHECK);
typeArrayHandle bcis(THREAD, b);
objArrayOop mirror_oop = oopFactory::new_objectArray(trace_chunk_size, CHECK);
objArrayHandle mirrors (THREAD, mirror_oop);
// backtrace has space for one chunk (next is NULL)
backtrace->obj_at_put(trace_methods_offset, methods());
backtrace->obj_at_put(trace_bcis_offset, bcis());
backtrace->obj_at_put(trace_mirrors_offset, mirrors());
set_backtrace(throwable(), backtrace());
BacktraceBuilder bt(CHECK); // creates a backtrace
set_backtrace(throwable(), bt.backtrace());
}
@ -1617,48 +1605,26 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
assert(throwable->is_a(SystemDictionary::Throwable_klass()), "sanity check");
objArrayOop backtrace = (objArrayOop)java_lang_Throwable::backtrace(throwable());
assert(backtrace != NULL, "backtrace not preallocated");
JavaThread* THREAD = JavaThread::current();
oop m = backtrace->obj_at(trace_methods_offset);
typeArrayOop methods = typeArrayOop(m);
assert(methods != NULL && methods->length() > 0, "method array not preallocated");
objArrayHandle backtrace (THREAD, (objArrayOop)java_lang_Throwable::backtrace(throwable()));
assert(backtrace.not_null(), "backtrace should have been preallocated");
oop b = backtrace->obj_at(trace_bcis_offset);
typeArrayOop bcis = typeArrayOop(b);
assert(bcis != NULL, "bci array not preallocated");
ResourceMark rm(THREAD);
vframeStream st(THREAD);
oop mr = backtrace->obj_at(trace_mirrors_offset);
objArrayOop mirrors = objArrayOop(mr);
assert(mirrors != NULL, "bci array not preallocated");
assert(methods->length() == bcis->length() &&
methods->length() == mirrors->length(),
"method and bci arrays should match");
JavaThread* thread = JavaThread::current();
ResourceMark rm(thread);
vframeStream st(thread);
BacktraceBuilder bt(backtrace);
// Unlike fill_in_stack_trace we do not skip fillInStackTrace or throwable init
// methods as preallocated errors aren't created by "java" code.
// fill in as much stack trace as possible
typeArrayOop methods = BacktraceBuilder::get_methods(backtrace);
int max_chunks = MIN2(methods->length(), (int)MaxJavaStackTraceDepth);
int chunk_count = 0;
for (;!st.at_end(); st.next()) {
// Add entry and smear the -1 bci to 0 since the array only holds
// unsigned shorts. The later line number lookup would just smear
// the -1 to a 0 even if it could be recorded.
int bci = st.bci();
if (bci == SynchronizationEntryBCI) bci = 0;
assert(bci == (jushort)bci, "doesn't fit");
bcis->ushort_at_put(chunk_count, bci);
methods->metadata_at_put(chunk_count, st.method());
mirrors->obj_at_put(chunk_count,
st.method()->method_holder()->java_mirror());
bt.push(st.method(), st.bci(), CHECK);
chunk_count++;
// Bail-out for deep stacks
@ -1672,7 +1638,6 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
java_lang_Throwable::set_stacktrace(throwable(), java_lang_Throwable::unassigned_stacktrace());
assert(java_lang_Throwable::unassigned_stacktrace() != NULL, "not initialized");
}
}
@ -1691,12 +1656,12 @@ int java_lang_Throwable::get_stack_trace_depth(oop throwable, TRAPS) {
chunk = next;
}
assert(chunk != NULL && chunk->obj_at(trace_next_offset) == NULL, "sanity check");
// Count element in remaining partial chunk
typeArrayOop methods = typeArrayOop(chunk->obj_at(trace_methods_offset));
typeArrayOop bcis = typeArrayOop(chunk->obj_at(trace_bcis_offset));
assert(methods != NULL && bcis != NULL, "sanity check");
for (int i = 0; i < methods->length(); i++) {
if (methods->metadata_at(i) == NULL) break;
// Count element in remaining partial chunk. NULL value for mirror
// marks the end of the stack trace elements that are saved.
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
assert(mirrors != NULL, "sanity check");
for (int i = 0; i < mirrors->length(); i++) {
if (mirrors->obj_at(i) == NULL) break;
depth++;
}
}
@ -1722,25 +1687,28 @@ oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS
if (chunk == NULL) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
// Get method,bci from chunk
typeArrayOop methods = typeArrayOop(chunk->obj_at(trace_methods_offset));
typeArrayOop bcis = typeArrayOop(chunk->obj_at(trace_bcis_offset));
assert(methods != NULL && bcis != NULL, "sanity check");
methodHandle method(THREAD, ((Method*)methods->metadata_at(chunk_index)));
int bci = bcis->ushort_at(chunk_index);
// Get method id, bci, version and mirror from chunk
typeArrayOop methods = BacktraceBuilder::get_methods(chunk);
typeArrayOop bcis = BacktraceBuilder::get_bcis(chunk);
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
assert(methods != NULL && bcis != NULL && mirrors != NULL, "sanity check");
int method = methods->short_at(chunk_index);
int version = version_at(bcis->int_at(chunk_index));
int bci = bci_at(bcis->int_at(chunk_index));
Handle mirror(THREAD, mirrors->obj_at(chunk_index));
// Chunk can be partial full
if (method.is_null()) {
if (mirror.is_null()) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
oop element = java_lang_StackTraceElement::create(method, bci, CHECK_0);
oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, CHECK_0);
return element;
}
oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
// SystemDictionary::stackTraceElement_klass() will be null for pre-1.4 JDKs
assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
oop java_lang_StackTraceElement::create(Handle mirror, int method_id, int version, int bci, TRAPS) {
// Allocate java.lang.StackTraceElement instance
Klass* k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
@ -1752,37 +1720,39 @@ oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
Handle element = ik->allocate_instance_handle(CHECK_0);
// Fill in class name
ResourceMark rm(THREAD);
const char* str = method->method_holder()->external_name();
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
const char* str = holder->external_name();
oop classname = StringTable::intern((char*) str, CHECK_0);
java_lang_StackTraceElement::set_declaringClass(element(), classname);
// Fill in method name
Method* method = holder->method_with_idnum(method_id);
oop methodname = StringTable::intern(method->name(), CHECK_0);
java_lang_StackTraceElement::set_methodName(element(), methodname);
// Fill in source file name
Symbol* source = method->method_holder()->source_file_name();
if (ShowHiddenFrames && source == NULL)
source = vmSymbols::unknown_class_name();
oop filename = StringTable::intern(source, CHECK_0);
java_lang_StackTraceElement::set_fileName(element(), filename);
// File in source line number
int line_number;
if (method->is_native()) {
// Negative value different from -1 below, enabling Java code in
// class java.lang.StackTraceElement to distinguish "native" from
// "no LineNumberTable".
line_number = -2;
} else {
// Returns -1 if no LineNumberTable, and otherwise actual line number
line_number = method->line_number_from_bci(bci);
if (line_number == -1 && ShowHiddenFrames) {
line_number = bci + 1000000;
}
}
java_lang_StackTraceElement::set_lineNumber(element(), line_number);
if (!version_matches(method, version)) {
// The method was redefined, accurate line number information isn't available
java_lang_StackTraceElement::set_fileName(element(), NULL);
java_lang_StackTraceElement::set_lineNumber(element(), -1);
} else {
// Fill in source file name and line number.
Symbol* source = holder->source_file_name();
if (ShowHiddenFrames && source == NULL)
source = vmSymbols::unknown_class_name();
oop filename = StringTable::intern(source, CHECK_0);
java_lang_StackTraceElement::set_fileName(element(), filename);
int line_number = get_line_number(method, bci);
java_lang_StackTraceElement::set_lineNumber(element(), line_number);
}
return element();
}
oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
Handle mirror (THREAD, method->method_holder()->java_mirror());
int method_id = method->method_idnum();
return create(mirror, method_id, method->constants()->version(), bci, THREAD);
}
void java_lang_reflect_AccessibleObject::compute_offsets() {
Klass* k = SystemDictionary::reflect_AccessibleObject_klass();
@ -2949,7 +2919,6 @@ int java_lang_System::err_offset_in_bytes() {
int java_lang_Class::_klass_offset;
int java_lang_Class::_array_klass_offset;
int java_lang_Class::_resolved_constructor_offset;
int java_lang_Class::_oop_size_offset;
int java_lang_Class::_static_oop_field_count_offset;
GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;
@ -3303,7 +3272,6 @@ void JavaClasses::check_offsets() {
// Fake fields
// CHECK_OFFSET("java/lang/Class", java_lang_Class, klass); // %%% this needs to be checked
// CHECK_OFFSET("java/lang/Class", java_lang_Class, array_klass); // %%% this needs to be checked
// CHECK_OFFSET("java/lang/Class", java_lang_Class, resolved_constructor); // %%% this needs to be checked
// java.lang.Throwable

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -206,7 +206,6 @@ class java_lang_String : AllStatic {
#define CLASS_INJECTED_FIELDS(macro) \
macro(java_lang_Class, klass, intptr_signature, false) \
macro(java_lang_Class, resolved_constructor, intptr_signature, false) \
macro(java_lang_Class, array_klass, intptr_signature, false) \
macro(java_lang_Class, oop_size, int_signature, false) \
macro(java_lang_Class, static_oop_field_count, int_signature, false)
@ -218,7 +217,6 @@ class java_lang_Class : AllStatic {
// The fake offsets are added by the class loader when java.lang.Class is loaded
static int _klass_offset;
static int _resolved_constructor_offset;
static int _array_klass_offset;
static int _oop_size_offset;
@ -254,15 +252,11 @@ class java_lang_Class : AllStatic {
static bool is_primitive(oop java_class);
static BasicType primitive_type(oop java_class);
static oop primitive_mirror(BasicType t);
// JVM_NewInstance support
static Method* resolved_constructor(oop java_class);
static void set_resolved_constructor(oop java_class, Method* constructor);
// JVM_NewArray support
static Klass* array_klass(oop java_class);
static void set_array_klass(oop java_class, Klass* klass);
// compiler support for class operations
static int klass_offset_in_bytes() { return _klass_offset; }
static int resolved_constructor_offset_in_bytes() { return _resolved_constructor_offset; }
static int array_klass_offset_in_bytes() { return _array_klass_offset; }
// Support for classRedefinedCount field
static int classRedefinedCount(oop the_class_mirror);
@ -469,8 +463,7 @@ class java_lang_Throwable: AllStatic {
static int static_unassigned_stacktrace_offset;
// Printing
static char* print_stack_element_to_buffer(Method* method, int bci);
static void print_to_stream(Handle stream, const char* str);
static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci);
// StackTrace (programmatic access, new since 1.4)
static void clear_stacktrace(oop throwable);
// No stack trace available
@ -490,12 +483,9 @@ class java_lang_Throwable: AllStatic {
static oop message(oop throwable);
static oop message(Handle throwable);
static void set_message(oop throwable, oop value);
// Print stack trace stored in exception by call-back to Java
// Note: this is no longer used in Merlin, but we still suppport
// it for compatibility.
static void print_stack_trace(oop throwable, oop print_stream);
static void print_stack_element(Handle stream, Method* method, int bci);
static void print_stack_element(outputStream *st, Method* method, int bci);
static void print_stack_element(outputStream *st, Handle mirror, int method,
int version, int bci);
static void print_stack_element(outputStream *st, methodHandle method, int bci);
static void print_stack_usage(Handle stream);
// Allocate space for backtrace (created but stack trace not filled in)
@ -1263,7 +1253,8 @@ class java_lang_StackTraceElement: AllStatic {
static void set_lineNumber(oop element, int value);
// Create an instance of StackTraceElement
static oop create(methodHandle m, int bci, TRAPS);
static oop create(Handle mirror, int method, int version, int bci, TRAPS);
static oop create(methodHandle method, int bci, TRAPS);
// Debugging
friend class JavaClasses;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -142,7 +142,7 @@ PlaceholderEntry* PlaceholderTable::find_and_add(int index, unsigned int hash,
}
// placeholder used to track class loading internal states
// placeholder is used to track class loading internal states
// placeholder existence now for loading superclass/superinterface
// superthreadQ tracks class circularity, while loading superclass/superinterface
// loadInstanceThreadQ tracks load_instance_class calls
@ -153,15 +153,17 @@ PlaceholderEntry* PlaceholderTable::find_and_add(int index, unsigned int hash,
// All claimants remove SeenThread after completing action
// On removal: if definer and all queues empty, remove entry
// Note: you can be in both placeholders and systemDictionary
// see parse_stream for redefine classes
// Therefore - must always check SD first
// Ignores the case where entry is not found
void PlaceholderTable::find_and_remove(int index, unsigned int hash,
Symbol* name, ClassLoaderData* loader_data, Thread* thread) {
Symbol* name, ClassLoaderData* loader_data,
classloadAction action,
Thread* thread) {
assert_locked_or_safepoint(SystemDictionary_lock);
PlaceholderEntry *probe = get_entry(index, hash, name, loader_data);
if (probe != NULL) {
// No other threads using this entry
probe->remove_seen_thread(thread, action);
// If no other threads using this entry, and this thread is not using this entry for other states
if ((probe->superThreadQ() == NULL) && (probe->loadInstanceThreadQ() == NULL)
&& (probe->defineThreadQ() == NULL) && (probe->definer() == NULL)) {
remove_entry(index, hash, name, loader_data);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -82,7 +82,7 @@ public:
};
// find_and_add returns probe pointer - old or new
// If no entry exists, add a placeholder entry and push SeenThread
// If no entry exists, add a placeholder entry and push SeenThread for classloadAction
// If entry exists, reuse entry and push SeenThread for classloadAction
PlaceholderEntry* find_and_add(int index, unsigned int hash,
Symbol* name, ClassLoaderData* loader_data,
@ -92,9 +92,11 @@ public:
void remove_entry(int index, unsigned int hash,
Symbol* name, ClassLoaderData* loader_data);
// Remove placeholder information
// find_and_remove first removes SeenThread for classloadAction
// If all queues are empty and definer is null, remove the PlacheholderEntry completely
void find_and_remove(int index, unsigned int hash,
Symbol* name, ClassLoaderData* loader_data, Thread* thread);
Symbol* name, ClassLoaderData* loader_data,
classloadAction action, Thread* thread);
// GC support.
void classes_do(KlassClosure* f);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -172,7 +172,7 @@ Klass* SystemDictionary::handle_resolution_exception(Symbol* class_name, Handle
assert(klass_h() == NULL, "Should not have result with exception pending");
Handle e(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
THROW_MSG_CAUSE_0(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string(), e);
THROW_MSG_CAUSE_NULL(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string(), e);
} else {
return NULL;
}
@ -181,9 +181,9 @@ Klass* SystemDictionary::handle_resolution_exception(Symbol* class_name, Handle
if (klass_h() == NULL) {
ResourceMark rm(THREAD);
if (throw_error) {
THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string());
THROW_MSG_NULL(vmSymbols::java_lang_NoClassDefFoundError(), class_name->as_C_string());
} else {
THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());
THROW_MSG_NULL(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());
}
}
return (Klass*)klass_h();
@ -343,29 +343,29 @@ Klass* SystemDictionary::resolve_super_or_fail(Symbol* child_name,
}
if (throw_circularity_error) {
ResourceMark rm(THREAD);
THROW_MSG_0(vmSymbols::java_lang_ClassCircularityError(), child_name->as_C_string());
THROW_MSG_NULL(vmSymbols::java_lang_ClassCircularityError(), child_name->as_C_string());
}
// java.lang.Object should have been found above
assert(class_name != NULL, "null super class for resolving");
// Resolve the super class or interface, check results on return
Klass* superk = NULL;
superk = SystemDictionary::resolve_or_null(class_name,
Klass* superk = SystemDictionary::resolve_or_null(class_name,
class_loader,
protection_domain,
THREAD);
KlassHandle superk_h(THREAD, superk);
// Note: clean up of placeholders currently in callers of
// resolve_super_or_fail - either at update_dictionary time
// or on error
// Clean up of placeholders moved so that each classloadAction registrar self-cleans up
// It is no longer necessary to keep the placeholder table alive until update_dictionary
// or error. GC used to walk the placeholder table as strong roots.
// The instanceKlass is kept alive because the class loader is on the stack,
// which keeps the loader_data alive, as well as all instanceKlasses in
// the loader_data. parseClassFile adds the instanceKlass to loader_data.
{
MutexLocker mu(SystemDictionary_lock, THREAD);
PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, loader_data);
if (probe != NULL) {
probe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER);
}
MutexLocker mu(SystemDictionary_lock, THREAD);
placeholders()->find_and_remove(p_index, p_hash, child_name, loader_data, PlaceholderTable::LOAD_SUPER, THREAD);
SystemDictionary_lock->notify_all();
}
if (HAS_PENDING_EXCEPTION || superk_h() == NULL) {
// can null superk
@ -430,8 +430,8 @@ void SystemDictionary::validate_protection_domain(instanceKlassHandle klass,
// We're using a No_Safepoint_Verifier to catch any place where we
// might potentially do a GC at all.
// SystemDictionary::do_unloading() asserts that classes are only
// unloaded at a safepoint.
// Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint;
dictionary()->add_protection_domain(d_index, d_hash, klass, loader_data,
protection_domain, THREAD);
@ -486,7 +486,6 @@ void SystemDictionary::double_lock_wait(Handle lockObject, TRAPS) {
// super class loading here.
// This also is critical in cases where the original thread gets stalled
// even in non-circularity situations.
// Note: only one thread can define the class, but multiple can resolve
// Note: must call resolve_super_or_fail even if null super -
// to force placeholder entry creation for this class for circularity detection
// Caller must check for pending exception
@ -518,14 +517,6 @@ instanceKlassHandle SystemDictionary::handle_parallel_super_load(
protection_domain,
true,
CHECK_(nh));
// We don't redefine the class, so we just need to clean up if there
// was not an error (don't want to modify any system dictionary
// data structures).
{
MutexLocker mu(SystemDictionary_lock, THREAD);
placeholders()->find_and_remove(p_index, p_hash, name, loader_data, THREAD);
SystemDictionary_lock->notify_all();
}
// parallelCapable class loaders do NOT wait for parallel superclass loads to complete
// Serial class loaders and bootstrap classloader do wait for superclass loads
@ -595,6 +586,10 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// Do lookup to see if class already exist and the protection domain
// has the right access
// This call uses find which checks protection domain already matches
// All subsequent calls use find_class, and set has_loaded_class so that
// before we return a result we call out to java to check for valid protection domain
// to allow returning the Klass* and add it to the pd_set if it is valid
unsigned int d_hash = dictionary()->compute_hash(name, loader_data);
int d_index = dictionary()->hash_to_index(d_hash);
Klass* probe = dictionary()->find(d_index, d_hash, name, loader_data,
@ -652,7 +647,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
}
}
// If the class in is in the placeholder table, class loading is in progress
// If the class is in the placeholder table, class loading is in progress
if (super_load_in_progress && havesupername==true) {
k = SystemDictionary::handle_parallel_super_load(name, superclassname,
class_loader, protection_domain, lockObject, THREAD);
@ -664,7 +659,9 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
}
}
bool throw_circularity_error = false;
if (!class_has_been_loaded) {
bool load_instance_added = false;
// add placeholder entry to record loading instance class
// Five cases:
@ -690,7 +687,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// No performance benefit and no deadlock issues.
// case 5. parallelCapable user level classloaders - without objectLocker
// Allow parallel classloading of a class/classloader pair
bool throw_circularity_error = false;
{
MutexLocker mu(SystemDictionary_lock, THREAD);
if (class_loader.is_null() || !is_parallelCapable(class_loader)) {
@ -726,12 +723,13 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
}
}
}
// All cases: add LOAD_INSTANCE
// All cases: add LOAD_INSTANCE holding SystemDictionary_lock
// case 3: UnsyncloadClass || case 5: parallelCapable: allow competing threads to try
// LOAD_INSTANCE in parallel
// add placeholder entry even if error - callers will remove on error
if (!throw_circularity_error && !class_has_been_loaded) {
PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, NULL, THREAD);
load_instance_added = true;
// For class loaders that do not acquire the classloader object lock,
// if they did not catch another thread holding LOAD_INSTANCE,
// need a check analogous to the acquire ObjectLocker/find_class
@ -740,19 +738,18 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// class loaders holding the ObjectLock shouldn't find the class here
Klass* check = find_class(d_index, d_hash, name, loader_data);
if (check != NULL) {
// Klass is already loaded, so just return it
// Klass is already loaded, so return it after checking/adding protection domain
k = instanceKlassHandle(THREAD, check);
class_has_been_loaded = true;
newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
placeholders()->find_and_remove(p_index, p_hash, name, loader_data, THREAD);
SystemDictionary_lock->notify_all();
}
}
}
// must throw error outside of owning lock
if (throw_circularity_error) {
assert(!HAS_PENDING_EXCEPTION && load_instance_added == false,"circularity error cleanup");
ResourceMark rm(THREAD);
THROW_MSG_0(vmSymbols::java_lang_ClassCircularityError(), name->as_C_string());
THROW_MSG_NULL(vmSymbols::java_lang_ClassCircularityError(), name->as_C_string());
}
if (!class_has_been_loaded) {
@ -782,20 +779,6 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
}
}
// clean up placeholder entries for success or error
// This cleans up LOAD_INSTANCE entries
// It also cleans up LOAD_SUPER entries on errors from
// calling load_instance_class
{
MutexLocker mu(SystemDictionary_lock, THREAD);
PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, name, loader_data);
if (probe != NULL) {
probe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
placeholders()->find_and_remove(p_index, p_hash, name, loader_data, THREAD);
SystemDictionary_lock->notify_all();
}
}
// If everything was OK (no exceptions, no null return value), and
// class_loader is NOT the defining loader, do a little more bookkeeping.
if (!HAS_PENDING_EXCEPTION && !k.is_null() &&
@ -819,18 +802,22 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
}
}
}
if (HAS_PENDING_EXCEPTION || k.is_null()) {
// On error, clean up placeholders
{
MutexLocker mu(SystemDictionary_lock, THREAD);
placeholders()->find_and_remove(p_index, p_hash, name, loader_data, THREAD);
SystemDictionary_lock->notify_all();
}
return NULL;
}
} // load_instance_class loop
if (load_instance_added == true) {
// clean up placeholder entries for LOAD_INSTANCE success or error
// This brackets the SystemDictionary updates for both defining
// and initiating loaders
MutexLocker mu(SystemDictionary_lock, THREAD);
placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD);
SystemDictionary_lock->notify_all();
}
}
if (HAS_PENDING_EXCEPTION || k.is_null()) {
return NULL;
}
#ifdef ASSERT
{
ClassLoaderData* loader_data = k->class_loader_data();
@ -850,8 +837,8 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// so we cannot allow GC to occur while we're holding this entry.
// We're using a No_Safepoint_Verifier to catch any place where we
// might potentially do a GC at all.
// SystemDictionary::do_unloading() asserts that classes are only
// unloaded at a safepoint.
// Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint;
if (dictionary()->is_valid_protection_domain(d_index, d_hash, name,
loader_data,
@ -898,8 +885,8 @@ Klass* SystemDictionary::find(Symbol* class_name,
// so we cannot allow GC to occur while we're holding this entry.
// We're using a No_Safepoint_Verifier to catch any place where we
// might potentially do a GC at all.
// SystemDictionary::do_unloading() asserts that classes are only
// unloaded at a safepoint.
// Dictionary::do_unloading() asserts that classes in SD are only
// unloaded at a safepoint. Anonymous classes are not in SD.
No_Safepoint_Verifier nosafepoint;
return dictionary()->find(d_index, d_hash, class_name, loader_data,
protection_domain, THREAD);
@ -965,10 +952,6 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
// throw potential ClassFormatErrors.
//
// Note: "name" is updated.
// Further note: a placeholder will be added for this class when
// super classes are loaded (resolve_super_or_fail). We expect this
// to be called for all classes but java.lang.Object; and we preload
// java.lang.Object through resolve_or_fail, not this path.
instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
loader_data,
@ -979,21 +962,6 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
true,
THREAD);
// We don't redefine the class, so we just need to clean up whether there
// was an error or not (don't want to modify any system dictionary
// data structures).
// Parsed name could be null if we threw an error before we got far
// enough along to parse it -- in that case, there is nothing to clean up.
if (parsed_name != NULL) {
unsigned int p_hash = placeholders()->compute_hash(parsed_name,
loader_data);
int p_index = placeholders()->hash_to_index(p_hash);
{
MutexLocker mu(SystemDictionary_lock, THREAD);
placeholders()->find_and_remove(p_index, p_hash, parsed_name, loader_data, THREAD);
SystemDictionary_lock->notify_all();
}
}
if (host_klass.not_null() && k.not_null()) {
assert(EnableInvokeDynamic, "");
@ -1062,10 +1030,6 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
// throw potential ClassFormatErrors.
//
// Note: "name" is updated.
// Further note: a placeholder will be added for this class when
// super classes are loaded (resolve_super_or_fail). We expect this
// to be called for all classes but java.lang.Object; and we preload
// java.lang.Object through resolve_or_fail, not this path.
instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
loader_data,
@ -1114,25 +1078,7 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
}
}
// If parsing the class file or define_instance_class failed, we
// need to remove the placeholder added on our behalf. But we
// must make sure parsed_name is valid first (it won't be if we had
// a format error before the class was parsed far enough to
// find the name).
if (HAS_PENDING_EXCEPTION && parsed_name != NULL) {
unsigned int p_hash = placeholders()->compute_hash(parsed_name,
loader_data);
int p_index = placeholders()->hash_to_index(p_hash);
{
MutexLocker mu(SystemDictionary_lock, THREAD);
placeholders()->find_and_remove(p_index, p_hash, parsed_name, loader_data, THREAD);
SystemDictionary_lock->notify_all();
}
return NULL;
}
// Make sure that we didn't leave a place holder in the
// SystemDictionary; this is only done on success
// Make sure we have an entry in the SystemDictionary on success
debug_only( {
if (!HAS_PENDING_EXCEPTION) {
assert(parsed_name != NULL, "parsed_name is still null?");
@ -1547,8 +1493,7 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(Symbol* clas
// Other cases fall through, and may run into duplicate defines
// caught by finding an entry in the SystemDictionary
if ((UnsyncloadClass || is_parallelDefine(class_loader)) && (probe->instance_klass() != NULL)) {
probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
placeholders()->find_and_remove(p_index, p_hash, name_h, loader_data, THREAD);
placeholders()->find_and_remove(p_index, p_hash, name_h, loader_data, PlaceholderTable::DEFINE_CLASS, THREAD);
SystemDictionary_lock->notify_all();
#ifdef ASSERT
Klass* check = find_class(d_index, d_hash, name_h, loader_data);
@ -1578,8 +1523,7 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(Symbol* clas
probe->set_instance_klass(k());
}
probe->set_definer(NULL);
probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
placeholders()->find_and_remove(p_index, p_hash, name_h, loader_data, THREAD);
placeholders()->find_and_remove(p_index, p_hash, name_h, loader_data, PlaceholderTable::DEFINE_CLASS, THREAD);
SystemDictionary_lock->notify_all();
}
}
@ -1736,6 +1680,8 @@ int SystemDictionary::calculate_systemdictionary_size(int classcount) {
}
return newsize;
}
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
// First, mark for unload all ClassLoaderData referencing a dead class loader.
bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive);
@ -2105,9 +2051,7 @@ void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
// All loaded classes get a unique ID.
TRACE_INIT_ID(k);
// Check for a placeholder. If there, remove it and make a
// new system dictionary entry.
placeholders()->find_and_remove(p_index, p_hash, name, loader_data, THREAD);
// Make a new system dictionary entry.
Klass* sd_check = find_class(d_index, d_hash, name, loader_data);
if (sd_check == NULL) {
dictionary()->add_klass(name, loader_data, k);
@ -2116,12 +2060,8 @@ void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
#ifdef ASSERT
sd_check = find_class(d_index, d_hash, name, loader_data);
assert (sd_check != NULL, "should have entry in system dictionary");
// Changed to allow PH to remain to complete class circularity checking
// while only one thread can define a class at one time, multiple
// classes can resolve the superclass for a class at one time,
// and the placeholder is used to track that
// Symbol* ph_check = find_placeholder(name, class_loader);
// assert (ph_check == NULL, "should not have a placeholder entry");
// Note: there may be a placeholder entry: for circularity testing
// or for parallel defines
#endif
SystemDictionary_lock->notify_all();
}

View File

@ -194,7 +194,10 @@
template(java_lang_VirtualMachineError, "java/lang/VirtualMachineError") \
template(java_lang_StackOverflowError, "java/lang/StackOverflowError") \
template(java_lang_StackTraceElement, "java/lang/StackTraceElement") \
\
/* Concurrency support */ \
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
template(sun_misc_Contended_signature, "Lsun/misc/Contended;") \
\
/* class symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, template, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@ -284,7 +287,7 @@
NOT_LP64( do_alias(intptr_signature, int_signature) ) \
LP64_ONLY( do_alias(intptr_signature, long_signature) ) \
template(selectAlternative_signature, "(ZLjava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/MethodHandle;") \
\
\
/* common method and field names */ \
template(object_initializer_name, "<init>") \
template(class_initializer_name, "<clinit>") \
@ -383,7 +386,6 @@
template(basicType_name, "basicType") \
template(append_name, "append") \
template(klass_name, "klass") \
template(resolved_constructor_name, "resolved_constructor") \
template(array_klass_name, "array_klass") \
template(oop_size_name, "oop_size") \
template(static_oop_field_count_name, "static_oop_field_count") \
@ -733,6 +735,11 @@
do_intrinsic(_checkIndex, java_nio_Buffer, checkIndex_name, int_int_signature, F_R) \
do_name( checkIndex_name, "checkIndex") \
\
do_class(sun_nio_cs_iso8859_1_Encoder, "sun/nio/cs/ISO_8859_1$Encoder") \
do_intrinsic(_encodeISOArray, sun_nio_cs_iso8859_1_Encoder, encodeISOArray_name, encodeISOArray_signature, F_S) \
do_name( encodeISOArray_name, "encodeISOArray") \
do_signature(encodeISOArray_signature, "([CI[BII)I") \
\
/* java/lang/ref/Reference */ \
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
\

View File

@ -30,6 +30,7 @@
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "compiler/compileBroker.hpp"
#include "gc_implementation/shared/markSweep.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/gcLocker.hpp"
@ -39,6 +40,7 @@
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/icache.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
@ -168,6 +170,8 @@ nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
return (nmethod*)cb;
}
static size_t maxCodeCacheUsed = 0;
CodeBlob* CodeCache::allocate(int size) {
// Do not seize the CodeCache lock here--if the caller has not
// already done so, we are going to lose bigtime, since the code
@ -192,6 +196,8 @@ CodeBlob* CodeCache::allocate(int size) {
(address)_heap->end() - (address)_heap->begin());
}
}
maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
(address)_heap->low_boundary()) - unallocated_capacity());
verify_if_often();
print_trace("allocation", cb, size);
return cb;
@ -928,7 +934,14 @@ void CodeCache::print_internals() {
FREE_C_HEAP_ARRAY(int, buckets, mtCode);
}
#endif // !PRODUCT
void CodeCache::print() {
print_summary(tty);
#ifndef PRODUCT
if (!Verbose) return;
CodeBlob_sizes live;
CodeBlob_sizes dead;
@ -953,7 +966,7 @@ void CodeCache::print() {
}
if (Verbose) {
if (WizardMode) {
// print the oop_map usage
int code_size = 0;
int number_of_blobs = 0;
@ -977,20 +990,30 @@ void CodeCache::print() {
tty->print_cr(" map size = %d", map_size);
}
#endif // !PRODUCT
}
#endif // PRODUCT
void CodeCache::print_summary(outputStream* st, bool detailed) {
size_t total = (_heap->high_boundary() - _heap->low_boundary());
st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT
"Kb max_free_chunk=" SIZE_FORMAT "Kb",
total/K, (total - unallocated_capacity())/K,
maxCodeCacheUsed/K, unallocated_capacity()/K, largest_free_block()/K);
void CodeCache::print_bounds(outputStream* st) {
st->print_cr("Code Cache [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
_heap->low_boundary(),
_heap->high(),
_heap->high_boundary());
st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
" adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT "Kb"
" largest_free_block=" SIZE_FORMAT,
nof_blobs(), nof_nmethods(), nof_adapters(),
unallocated_capacity()/K, largest_free_block());
if (detailed) {
st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
_heap->low_boundary(),
_heap->high(),
_heap->high_boundary());
st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
" adapters=" UINT32_FORMAT,
nof_blobs(), nof_nmethods(), nof_adapters());
st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
"enabled" : Arguments::mode() == Arguments::_int ?
"disabled (interpreter mode)" :
"disabled (not enough contiguous free space left)");
}
}
void CodeCache::log_state(outputStream* st) {

View File

@ -145,11 +145,11 @@ class CodeCache : AllStatic {
static void prune_scavenge_root_nmethods();
// Printing/debugging
static void print() PRODUCT_RETURN; // prints summary
static void print(); // prints summary
static void print_internals();
static void verify(); // verifies the code cache
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
static void print_bounds(outputStream* st); // Prints a summary of the bounds of the code cache
static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
static void log_state(outputStream* st);
// The full limits of the codeCache

View File

@ -50,6 +50,7 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
// Missing feature tests
virtual bool supports_native() { return true; }
virtual bool supports_osr () { return true; }
virtual bool can_compile_method(methodHandle method) { return true; }
#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK))
virtual bool is_c1 () { return false; }
virtual bool is_c2 () { return false; }

View File

@ -1218,7 +1218,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
// lock, make sure that the compilation
// isn't prohibited in a straightforward way.
if (compiler(comp_level) == NULL || compilation_is_prohibited(method, osr_bci, comp_level)) {
if (compiler(comp_level) == NULL || !compiler(comp_level)->can_compile_method(method) || compilation_is_prohibited(method, osr_bci, comp_level)) {
return NULL;
}
@ -1714,6 +1714,20 @@ void CompileBroker::maybe_block() {
}
}
// wrapper for CodeCache::print_summary()
static void codecache_print(bool detailed)
{
ResourceMark rm;
stringStream s;
// Dump code cache into a buffer before locking the tty,
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::print_summary(&s, detailed);
}
ttyLocker ttyl;
tty->print_cr(s.as_string());
}
// ------------------------------------------------------------------
// CompileBroker::invoke_compiler_on_method
//
@ -1841,6 +1855,9 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, (int)time.milliseconds(), task->num_inlined_bytecodes());
}
if (PrintCodeCacheOnCompilation)
codecache_print(/* detailed= */ false);
// Disable compilation, if required.
switch (compilable) {
case ciEnv::MethodCompilable_never:
@ -1885,6 +1902,7 @@ void CompileBroker::handle_full_code_cache() {
UseInterpreter = true;
if (UseCompiler || AlwaysCompileLoopMethods ) {
if (xtty != NULL) {
ResourceMark rm;
stringStream s;
// Dump code cache state into a buffer before locking the tty,
// because log_state() will use locks causing lock conflicts.
@ -1898,9 +1916,9 @@ void CompileBroker::handle_full_code_cache() {
}
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
CodeCache::print_bounds(tty);
#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
codecache_print(/* detailed= */ true);
before_exit(JavaThread::current());
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
@ -1913,6 +1931,7 @@ void CompileBroker::handle_full_code_cache() {
AlwaysCompileLoopMethods = false;
}
}
codecache_print(/* detailed= */ true);
}
// ------------------------------------------------------------------

View File

@ -542,17 +542,17 @@ void print_register_type(OopMapValue::oop_types x, VMReg optional,
st->print("Oop");
break;
case OopMapValue::value_value:
st->print("Value" );
st->print("Value");
break;
case OopMapValue::narrowoop_value:
tty->print("NarrowOop" );
st->print("NarrowOop");
break;
case OopMapValue::callee_saved_value:
st->print("Callers_" );
st->print("Callers_");
optional->print_on(st);
break;
case OopMapValue::derived_oop_value:
st->print("Derived_oop_" );
st->print("Derived_oop_");
optional->print_on(st);
break;
default:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -554,7 +554,7 @@ void CompactibleFreeListSpace::reportFreeListStatistics() const {
reportIndexedFreeListStatistics();
size_t total_size = totalSizeInIndexedFreeLists() +
_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3338,7 +3338,7 @@ bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size - bytes;
gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
}
@ -9203,7 +9203,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
if (Verbose && PrintGCDetails) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + bytes;
gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
}

View File

@ -131,17 +131,23 @@ void WorkerDataArray<T>::print(int level, const char* title) {
#ifndef PRODUCT
template <> const int WorkerDataArray<int>::_uninitialized = -1;
template <> const double WorkerDataArray<double>::_uninitialized = -1.0;
template <> const size_t WorkerDataArray<size_t>::_uninitialized = (size_t)-1;
template <class T>
void WorkerDataArray<T>::reset() {
for (uint i = 0; i < _length; i++) {
_data[i] = (T)-1;
_data[i] = (T)_uninitialized;
}
}
template <class T>
void WorkerDataArray<T>::verify() {
for (uint i = 0; i < _length; i++) {
assert(_data[i] >= (T)0, err_msg("Invalid data for worker %d", i));
assert(_data[i] != _uninitialized,
err_msg("Invalid data for worker " UINT32_FORMAT ", data: %lf, uninitialized: %lf",
i, (double)_data[i], (double)_uninitialized));
}
}
@ -201,20 +207,20 @@ void G1GCPhaseTimes::note_gc_end() {
_last_termination_attempts.verify();
_last_gc_worker_end_times_ms.verify();
for (uint i = 0; i < _active_gc_threads; i++) {
double worker_time = _last_gc_worker_end_times_ms.get(i) - _last_gc_worker_start_times_ms.get(i);
_last_gc_worker_times_ms.set(i, worker_time);
for (uint i = 0; i < _active_gc_threads; i++) {
double worker_time = _last_gc_worker_end_times_ms.get(i) - _last_gc_worker_start_times_ms.get(i);
_last_gc_worker_times_ms.set(i, worker_time);
double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
_last_satb_filtering_times_ms.get(i) +
_last_update_rs_times_ms.get(i) +
_last_scan_rs_times_ms.get(i) +
_last_obj_copy_times_ms.get(i) +
_last_termination_times_ms.get(i);
double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
_last_satb_filtering_times_ms.get(i) +
_last_update_rs_times_ms.get(i) +
_last_scan_rs_times_ms.get(i) +
_last_obj_copy_times_ms.get(i) +
_last_termination_times_ms.get(i);
double worker_other_time = worker_time - worker_known_time;
_last_gc_worker_other_times_ms.set(i, worker_other_time);
}
double worker_other_time = worker_time - worker_known_time;
_last_gc_worker_other_times_ms.set(i, worker_other_time);
}
_last_gc_worker_times_ms.verify();
_last_gc_worker_other_times_ms.verify();

View File

@ -35,6 +35,8 @@ class WorkerDataArray : public CHeapObj<mtGC> {
const char* _print_format;
bool _print_sum;
NOT_PRODUCT(static const T _uninitialized;)
// We are caching the sum and average to only have to calculate them once.
// This is not done in an MT-safe way. It is intetened to allow single
// threaded code to call sum() and average() multiple times in any order

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -287,24 +287,24 @@
"The number of times we'll force an overflow during " \
"concurrent marking") \
\
experimental(uintx, G1NewSizePercent, 20, \
experimental(uintx, G1NewSizePercent, 5, \
"Percentage (0-100) of the heap size to use as default " \
"minimum young gen size.") \
\
experimental(uintx, G1MaxNewSizePercent, 80, \
experimental(uintx, G1MaxNewSizePercent, 60, \
"Percentage (0-100) of the heap size to use as default " \
" maximum young gen size.") \
\
experimental(uintx, G1MixedGCLiveThresholdPercent, 90, \
experimental(uintx, G1MixedGCLiveThresholdPercent, 65, \
"Threshold for regions to be considered for inclusion in the " \
"collection set of mixed GCs. " \
"Regions with live bytes exceeding this will not be collected.") \
\
product(uintx, G1HeapWastePercent, 5, \
product(uintx, G1HeapWastePercent, 10, \
"Amount of space, expressed as a percentage of the heap size, " \
"that G1 is willing not to collect to avoid expensive GCs.") \
\
product(uintx, G1MixedGCCountTarget, 4, \
product(uintx, G1MixedGCCountTarget, 8, \
"The target number of mixed GCs after a marking cycle.") \
\
experimental(uintx, G1OldCSetRegionThresholdPercent, 10, \

View File

@ -878,12 +878,6 @@ void EvacuateFollowersClosureGeneral::do_void() {
bool ParNewGeneration::_avoid_promotion_undo = false;
void ParNewGeneration::adjust_desired_tenuring_threshold() {
// Set the desired survivor size to half the real survivor space
_tenuring_threshold =
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
}
// A Generation that does parallel young-gen collection.
void ParNewGeneration::collect(bool full,
@ -1013,6 +1007,8 @@ void ParNewGeneration::collect(bool full,
size_policy->reset_gc_overhead_limit_count();
assert(to()->is_empty(), "to space should be empty now");
adjust_desired_tenuring_threshold();
} else {
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
@ -1035,7 +1031,6 @@ void ParNewGeneration::collect(bool full,
from()->set_concurrent_iteration_safe_limit(from()->top());
to()->set_concurrent_iteration_safe_limit(to()->top());
adjust_desired_tenuring_threshold();
if (ResizePLAB) {
plab_stats()->adjust_desired_plab_sz(n_workers);
}

View File

@ -347,10 +347,6 @@ class ParNewGeneration: public DefNewGeneration {
bool survivor_overflow() { return _survivor_overflow; }
void set_survivor_overflow(bool v) { _survivor_overflow = v; }
// Adjust the tenuring threshold. See the implementation for
// the details of the policy.
virtual void adjust_desired_tenuring_threshold();
public:
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -529,7 +529,7 @@ bool PSScavenge::invoke_no_policy() {
if (PrintTenuringDistribution) {
gclog_or_tty->cr();
gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %u (max %u)",
gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
size_policy->calculated_survivor_size_in_bytes(),
_tenuring_threshold, MaxTenuringThreshold);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,7 +96,7 @@ uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
if (PrintTenuringDistribution) {
gclog_or_tty->cr();
gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %u (max %u)",
gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
desired_survivor_size*oopSize, result, MaxTenuringThreshold);
}

View File

@ -550,6 +550,11 @@ HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
return allocate(size, is_tlab);
}
void DefNewGeneration::adjust_desired_tenuring_threshold() {
// Set the desired survivor size to half the real survivor space
_tenuring_threshold =
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
}
void DefNewGeneration::collect(bool full,
bool clear_all_soft_refs,
@ -649,9 +654,7 @@ void DefNewGeneration::collect(bool full,
assert(to()->is_empty(), "to space should be empty now");
// Set the desired survivor size to half the real survivor space
_tenuring_threshold =
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
adjust_desired_tenuring_threshold();
// A successful scavenge should restart the GC time limit count which is
// for full GC's.

View File

@ -124,7 +124,9 @@ protected:
_should_allocate_from_space = true;
}
protected:
// Tenuring
void adjust_desired_tenuring_threshold();
// Spaces
EdenSpace* _eden_space;
ContiguousSpace* _from_space;

View File

@ -66,7 +66,11 @@ class MetadataFactory : AllStatic {
if (data != NULL) {
assert(loader_data != NULL, "shouldn't pass null");
int size = data->size();
loader_data->metaspace_non_null()->deallocate((MetaWord*)data, size, false);
if (DumpSharedSpaces) {
loader_data->ro_metaspace()->deallocate((MetaWord*)data, size, false);
} else {
loader_data->metaspace_non_null()->deallocate((MetaWord*)data, size, false);
}
}
}
@ -77,6 +81,7 @@ class MetadataFactory : AllStatic {
assert(loader_data != NULL, "shouldn't pass null");
int size = md->size();
// Call metadata's deallocate function which will call deallocate fields
assert(!DumpSharedSpaces, "cannot deallocate metadata when dumping CDS archive");
assert(!md->on_stack(), "can't deallocate things on stack");
md->deallocate_contents(loader_data);
loader_data->metaspace_non_null()->deallocate((MetaWord*)md, size, md->is_klass());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -373,17 +373,44 @@ void VM_PopulateDumpSharedSpace::doit() {
md_top = wc.get_top();
// Print shared spaces all the time
const char* fmt = "%s space: " PTR_FORMAT " out of " PTR_FORMAT " words allocated at " PTR_FORMAT ".";
const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT;
Metaspace* ro_space = _loader_data->ro_metaspace();
Metaspace* rw_space = _loader_data->rw_metaspace();
tty->print_cr(fmt, "ro", ro_space->used_words(Metaspace::NonClassType),
ro_space->capacity_words(Metaspace::NonClassType),
ro_space->bottom());
tty->print_cr(fmt, "rw", rw_space->used_words(Metaspace::NonClassType),
rw_space->capacity_words(Metaspace::NonClassType),
rw_space->bottom());
tty->print_cr(fmt, "md", md_top - md_low, md_end-md_low, md_low);
tty->print_cr(fmt, "mc", mc_top - mc_low, mc_end-mc_low, mc_low);
const size_t BPW = BytesPerWord;
// Allocated size of each space (may not be all occupied)
const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW;
const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW;
const size_t md_alloced = md_end-md_low;
const size_t mc_alloced = mc_end-mc_low;
const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced;
// Occupied size of each space.
const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW;
const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW;
const size_t md_bytes = size_t(md_top - md_low);
const size_t mc_bytes = size_t(mc_top - mc_low);
// Percent of total size
const size_t total_bytes = ro_bytes + rw_bytes + md_bytes + mc_bytes;
const double ro_t_perc = ro_bytes / double(total_bytes) * 100.0;
const double rw_t_perc = rw_bytes / double(total_bytes) * 100.0;
const double md_t_perc = md_bytes / double(total_bytes) * 100.0;
const double mc_t_perc = mc_bytes / double(total_bytes) * 100.0;
// Percent of fullness of each space
const double ro_u_perc = ro_bytes / double(ro_alloced) * 100.0;
const double rw_u_perc = rw_bytes / double(rw_alloced) * 100.0;
const double md_u_perc = md_bytes / double(md_alloced) * 100.0;
const double mc_u_perc = mc_bytes / double(mc_alloced) * 100.0;
const double total_u_perc = total_bytes / double(total_alloced) * 100.0;
tty->print_cr(fmt, "ro", ro_bytes, ro_t_perc, ro_alloced, ro_u_perc, ro_space->bottom());
tty->print_cr(fmt, "rw", rw_bytes, rw_t_perc, rw_alloced, rw_u_perc, rw_space->bottom());
tty->print_cr(fmt, "md", md_bytes, md_t_perc, md_alloced, md_u_perc, md_low);
tty->print_cr(fmt, "mc", mc_bytes, mc_t_perc, mc_alloced, mc_u_perc, mc_low);
tty->print_cr("total : %9d [100.0%% of total] out of %9d bytes [%4.1f%% used]",
total_bytes, total_alloced, total_u_perc);
// Update the vtable pointers in all of the Klass objects in the
// heap. They should point to newly generated vtable.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -228,7 +228,7 @@ void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
if (size < alignment || size % alignment != 0) {
ResourceMark rm;
stringStream st;
st.print("Size of %s (%ld bytes) must be aligned to %ld bytes", name, size, alignment);
st.print("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment);
char* error = st.as_string();
vm_exit_during_initialization(error);
}

View File

@ -122,7 +122,12 @@ class ExceptionTableElement VALUE_OBJ_CLASS_SPEC {
class MethodParametersElement VALUE_OBJ_CLASS_SPEC {
public:
u2 name_cp_index;
u4 flags;
// This has to happen, otherwise it will cause SIGBUS from a
// misaligned u4 on some architectures (ie SPARC)
// because MethodParametersElements are only aligned mod 2
// within the ConstMethod container u2 flags_hi;
u2 flags_hi;
u2 flags_lo;
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,7 @@ ConstantPool::ConstantPool(Array<u1>* tags) {
set_pool_holder(NULL);
set_flags(0);
// only set to non-zero if constant pool is merged by RedefineClasses
set_orig_length(0);
set_version(0);
set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
// all fields are initialized; needed for GC
set_on_stack(false);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -103,8 +103,8 @@ class ConstantPool : public Metadata {
union {
// set for CDS to restore resolved references
int _resolved_reference_length;
// only set to non-zero if constant pool is merged by RedefineClasses
int _orig_length;
// keeps version number for redefined classes (used in backtrace)
int _version;
} _saved;
Monitor* _lock;
@ -784,8 +784,11 @@ class ConstantPool : public Metadata {
static void copy_cp_to_impl(constantPoolHandle from_cp, int start_i, int end_i, constantPoolHandle to_cp, int to_i, TRAPS);
static void copy_entry_to(constantPoolHandle from_cp, int from_i, constantPoolHandle to_cp, int to_i, TRAPS);
int find_matching_entry(int pattern_i, constantPoolHandle search_cp, TRAPS);
int orig_length() const { return _saved._orig_length; }
void set_orig_length(int orig_length) { _saved._orig_length = orig_length; }
int version() const { return _saved._version; }
void set_version(int version) { _saved._version = version; }
void increment_and_save_version(int version) {
_saved._version = version >= 0 ? (version + 1) : version; // keep overflow
}
void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; }
int resolved_reference_length() const { return _saved._resolved_reference_length; }

View File

@ -43,14 +43,29 @@ class FieldInfo VALUE_OBJ_CLASS_SPEC {
public:
// fields
// Field info extracted from the class file and stored
// as an array of 7 shorts
// as an array of 6 shorts.
#define FIELDINFO_TAG_SIZE 2
#define FIELDINFO_TAG_BLANK 0
#define FIELDINFO_TAG_OFFSET 1
#define FIELDINFO_TAG_TYPE_PLAIN 2
#define FIELDINFO_TAG_TYPE_CONTENDED 3
#define FIELDINFO_TAG_MASK 3
// Packed field has the tag, and can be either of:
// hi bits <--------------------------- lo bits
// |---------high---------|---------low---------|
// ..........................................00 - blank
// [------------------offset----------------]01 - real field offset
// ......................[-------type-------]10 - plain field with type
// [--contention_group--][-------type-------]11 - contended field with type and contention group
enum FieldOffset {
access_flags_offset = 0,
name_index_offset = 1,
signature_index_offset = 2,
initval_index_offset = 3,
low_offset = 4,
high_offset = 5,
low_packed_offset = 4,
high_packed_offset = 5,
field_slots = 6
};
@ -76,17 +91,90 @@ class FieldInfo VALUE_OBJ_CLASS_SPEC {
void initialize(u2 access_flags,
u2 name_index,
u2 signature_index,
u2 initval_index,
u4 offset) {
u2 initval_index) {
_shorts[access_flags_offset] = access_flags;
_shorts[name_index_offset] = name_index;
_shorts[signature_index_offset] = signature_index;
_shorts[initval_index_offset] = initval_index;
set_offset(offset);
_shorts[low_packed_offset] = 0;
_shorts[high_packed_offset] = 0;
}
u2 access_flags() const { return _shorts[access_flags_offset]; }
u4 offset() const { return build_int_from_shorts(_shorts[low_offset], _shorts[high_offset]); }
u4 offset() const {
u2 lo = _shorts[low_packed_offset];
switch(lo & FIELDINFO_TAG_MASK) {
case FIELDINFO_TAG_OFFSET:
return build_int_from_shorts(_shorts[low_packed_offset], _shorts[high_packed_offset]) >> FIELDINFO_TAG_SIZE;
#ifndef PRODUCT
case FIELDINFO_TAG_TYPE_PLAIN:
ShouldNotReachHere2("Asking offset for the plain type field");
case FIELDINFO_TAG_TYPE_CONTENDED:
ShouldNotReachHere2("Asking offset for the contended type field");
case FIELDINFO_TAG_BLANK:
ShouldNotReachHere2("Asking offset for the blank field");
#endif
}
ShouldNotReachHere();
return 0;
}
bool is_contended() const {
u2 lo = _shorts[low_packed_offset];
switch(lo & FIELDINFO_TAG_MASK) {
case FIELDINFO_TAG_TYPE_PLAIN:
return false;
case FIELDINFO_TAG_TYPE_CONTENDED:
return true;
#ifndef PRODUCT
case FIELDINFO_TAG_OFFSET:
ShouldNotReachHere2("Asking contended flag for the field with offset");
case FIELDINFO_TAG_BLANK:
ShouldNotReachHere2("Asking contended flag for the blank field");
#endif
}
ShouldNotReachHere();
return false;
}
u2 contended_group() const {
u2 lo = _shorts[low_packed_offset];
switch(lo & FIELDINFO_TAG_MASK) {
case FIELDINFO_TAG_TYPE_PLAIN:
return 0;
case FIELDINFO_TAG_TYPE_CONTENDED:
return _shorts[high_packed_offset];
#ifndef PRODUCT
case FIELDINFO_TAG_OFFSET:
ShouldNotReachHere2("Asking the contended group for the field with offset");
case FIELDINFO_TAG_BLANK:
ShouldNotReachHere2("Asking the contended group for the blank field");
#endif
}
ShouldNotReachHere();
return 0;
}
u2 allocation_type() const {
u2 lo = _shorts[low_packed_offset];
switch(lo & FIELDINFO_TAG_MASK) {
case FIELDINFO_TAG_TYPE_PLAIN:
case FIELDINFO_TAG_TYPE_CONTENDED:
return (lo >> FIELDINFO_TAG_SIZE);
#ifndef PRODUCT
case FIELDINFO_TAG_OFFSET:
ShouldNotReachHere2("Asking the field type for field with offset");
case FIELDINFO_TAG_BLANK:
ShouldNotReachHere2("Asking the field type for the blank field");
#endif
}
ShouldNotReachHere();
return 0;
}
bool is_offset_set() const {
return (_shorts[low_packed_offset] & FIELDINFO_TAG_MASK) == FIELDINFO_TAG_OFFSET;
}
Symbol* name(constantPoolHandle cp) const {
int index = name_index();
@ -106,8 +194,46 @@ class FieldInfo VALUE_OBJ_CLASS_SPEC {
void set_access_flags(u2 val) { _shorts[access_flags_offset] = val; }
void set_offset(u4 val) {
_shorts[low_offset] = extract_low_short_from_int(val);
_shorts[high_offset] = extract_high_short_from_int(val);
val = val << FIELDINFO_TAG_SIZE; // make room for tag
_shorts[low_packed_offset] = extract_low_short_from_int(val) | FIELDINFO_TAG_OFFSET;
_shorts[high_packed_offset] = extract_high_short_from_int(val);
}
void set_allocation_type(int type) {
u2 lo = _shorts[low_packed_offset];
switch(lo & FIELDINFO_TAG_MASK) {
case FIELDINFO_TAG_BLANK:
_shorts[low_packed_offset] = ((type << FIELDINFO_TAG_SIZE)) & 0xFFFF;
_shorts[low_packed_offset] &= ~FIELDINFO_TAG_MASK;
_shorts[low_packed_offset] |= FIELDINFO_TAG_TYPE_PLAIN;
return;
#ifndef PRODUCT
case FIELDINFO_TAG_TYPE_PLAIN:
case FIELDINFO_TAG_TYPE_CONTENDED:
case FIELDINFO_TAG_OFFSET:
ShouldNotReachHere2("Setting the field type with overwriting");
#endif
}
ShouldNotReachHere();
}
void set_contended_group(u2 val) {
u2 lo = _shorts[low_packed_offset];
switch(lo & FIELDINFO_TAG_MASK) {
case FIELDINFO_TAG_TYPE_PLAIN:
_shorts[low_packed_offset] |= FIELDINFO_TAG_TYPE_CONTENDED;
_shorts[high_packed_offset] = val;
return;
#ifndef PRODUCT
case FIELDINFO_TAG_TYPE_CONTENDED:
ShouldNotReachHere2("Overwriting contended group");
case FIELDINFO_TAG_BLANK:
ShouldNotReachHere2("Setting contended group for the blank field");
case FIELDINFO_TAG_OFFSET:
ShouldNotReachHere2("Setting contended group for field with offset");
#endif
}
ShouldNotReachHere();
}
bool is_internal() const {

View File

@ -160,9 +160,26 @@ class FieldStreamBase : public StackObj {
return field()->offset();
}
int allocation_type() const {
return field()->allocation_type();
}
void set_offset(int offset) {
field()->set_offset(offset);
}
bool is_offset_set() const {
return field()->is_offset_set();
}
bool is_contended() const {
return field()->is_contended();
}
int contended_group() const {
return field()->contended_group();
}
};
// Iterate over only the internal fields

View File

@ -2890,11 +2890,7 @@ void InstanceKlass::oop_print_on(oop obj, outputStream* st) {
st->print(BULLET"fake entry for mirror: ");
mirrored_klass->print_value_on_maybe_null(st);
st->cr();
st->print(BULLET"fake entry resolved_constructor: ");
Method* ctor = java_lang_Class::resolved_constructor(obj);
ctor->print_value_on_maybe_null(st);
Klass* array_klass = java_lang_Class::array_klass(obj);
st->cr();
st->print(BULLET"fake entry for array: ");
array_klass->print_value_on_maybe_null(st);
st->cr();

View File

@ -225,12 +225,17 @@ class InstanceKlass: public Klass {
u2 _java_fields_count; // The number of declared Java fields
int _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks
// _is_marked_dependent can be set concurrently, thus cannot be part of the
// _misc_flags.
bool _is_marked_dependent; // used for marking during flushing and deoptimization
enum {
_misc_rewritten = 1 << 0, // methods rewritten.
_misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
_misc_should_verify_class = 1 << 2, // allow caching of preverification
_misc_is_anonymous = 1 << 3 // has embedded _inner_classes field
_misc_is_anonymous = 1 << 3, // has embedded _inner_classes field
_misc_is_contended = 1 << 4, // marked with contended annotation
_misc_has_default_methods = 1 << 5 // class/superclass/implemented interfaces has default methods
};
u2 _misc_flags;
u2 _minor_version; // minor version number of class file
@ -253,10 +258,6 @@ class InstanceKlass: public Klass {
jint _cached_class_file_len; // JVMTI: length of above
JvmtiCachedClassFieldMap* _jvmti_cached_class_field_map; // JVMTI: used during heap iteration
// true if class, superclass, or implemented interfaces have default methods
bool _has_default_methods;
volatile u2 _idnum_allocated_count; // JNI/JVMTI: increments with the addition of methods, old ids don't change
// Method array.
Array<Method*>* _methods;
// Interface (Klass*s) this class declares locally to implement.
@ -280,6 +281,8 @@ class InstanceKlass: public Klass {
// ...
Array<u2>* _fields;
volatile u2 _idnum_allocated_count; // JNI/JVMTI: increments with the addition of methods, old ids don't change
// Class states are defined as ClassState (see above).
// Place the _init_state here to utilize the unused 2-byte after
// _idnum_allocated_count.
@ -550,6 +553,17 @@ class InstanceKlass: public Klass {
return is_anonymous() ? java_mirror() : class_loader();
}
bool is_contended() const {
return (_misc_flags & _misc_is_contended) != 0;
}
void set_is_contended(bool value) {
if (value) {
_misc_flags |= _misc_is_contended;
} else {
_misc_flags &= ~_misc_is_contended;
}
}
// signers
objArrayOop signers() const { return _signers; }
void set_signers(objArrayOop s) { klass_oop_store((oop*)&_signers, s); }
@ -616,8 +630,16 @@ class InstanceKlass: public Klass {
return _jvmti_cached_class_field_map;
}
bool has_default_methods() const { return _has_default_methods; }
void set_has_default_methods(bool b) { _has_default_methods = b; }
bool has_default_methods() const {
return (_misc_flags & _misc_has_default_methods) != 0;
}
void set_has_default_methods(bool b) {
if (b) {
_misc_flags |= _misc_has_default_methods;
} else {
_misc_flags &= ~_misc_has_default_methods;
}
}
// for adding methods, ConstMethod::UNSET_IDNUM means no more ids available
inline u2 next_method_idnum();

View File

@ -516,6 +516,9 @@
develop(bool, SpecialArraysEquals, true, \
"special version of Arrays.equals(char[],char[])") \
\
product(bool, SpecialEncodeISOArray, true, \
"special version of ISO_8859_1$Encoder.encodeISOArray") \
\
develop(bool, BailoutToInterpreterForThrows, false, \
"Compiled methods which throws/catches exceptions will be " \
"deopt and intp.") \

View File

@ -165,13 +165,13 @@ uint ReturnNode::match_edge(uint idx) const {
#ifndef PRODUCT
void ReturnNode::dump_req() const {
void ReturnNode::dump_req(outputStream *st) const {
// Dump the required inputs, enclosed in '(' and ')'
uint i; // Exit value of loop
for( i=0; i<req(); i++ ) { // For all required inputs
if( i == TypeFunc::Parms ) tty->print("returns");
if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
else tty->print("_ ");
for (i = 0; i < req(); i++) { // For all required inputs
if (i == TypeFunc::Parms) st->print("returns");
if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
else st->print("_ ");
}
}
#endif
@ -208,13 +208,13 @@ uint RethrowNode::match_edge(uint idx) const {
}
#ifndef PRODUCT
void RethrowNode::dump_req() const {
void RethrowNode::dump_req(outputStream *st) const {
// Dump the required inputs, enclosed in '(' and ')'
uint i; // Exit value of loop
for( i=0; i<req(); i++ ) { // For all required inputs
if( i == TypeFunc::Parms ) tty->print("exception");
if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
else tty->print("_ ");
for (i = 0; i < req(); i++) { // For all required inputs
if (i == TypeFunc::Parms) st->print("exception");
if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
else st->print("_ ");
}
}
#endif
@ -330,7 +330,8 @@ static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, c
st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
return;
}
if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
if (regalloc->node_regs_max_index() > 0 &&
OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
char buf[50];
regalloc->dump_register(n,buf);
st->print(" %s%d]=%s",msg,i,buf);
@ -381,7 +382,7 @@ static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, c
//------------------------------format-----------------------------------------
void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
st->print(" #");
if( _method ) {
if (_method) {
_method->print_short_name(st);
st->print(" @ bci:%d ",_bci);
} else {
@ -393,21 +394,22 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
MachSafePointNode *mcall = n->as_MachSafePoint();
uint i;
// Print locals
for( i = 0; i < (uint)loc_size(); i++ )
format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs );
for (i = 0; i < (uint)loc_size(); i++)
format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
// Print stack
for (i = 0; i < (uint)stk_size(); i++) {
if ((uint)(_stkoff + i) >= mcall->len())
st->print(" oob ");
else
format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs );
format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
}
for (i = 0; (int)i < nof_monitors(); i++) {
Node *box = mcall->monitor_box(this, i);
Node *obj = mcall->monitor_obj(this, i);
if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
if (regalloc->node_regs_max_index() > 0 &&
OptoReg::is_valid(regalloc->get_reg_first(box))) {
box = BoxLockNode::box_node(box);
format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs );
format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
} else {
OptoReg::Name box_reg = BoxLockNode::reg(box);
st->print(" MON-BOX%d=%s+%d",
@ -420,7 +422,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
if (BoxLockNode::box_node(box)->is_eliminated())
obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
}
format_helper( regalloc, st, obj, obj_msg, i, &scobjs );
format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
}
for (i = 0; i < (uint)scobjs.length(); i++) {
@ -463,9 +465,9 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
st->print(" [");
cifield = iklass->nonstatic_field_at(0);
cifield->print_name_on(st);
format_helper( regalloc, st, fld_node, ":", 0, &scobjs );
format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
} else {
format_helper( regalloc, st, fld_node, "[", 0, &scobjs );
format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
}
for (uint j = 1; j < nf; j++) {
fld_node = mcall->in(first_ind+j);
@ -473,9 +475,9 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
st->print(", [");
cifield = iklass->nonstatic_field_at(j);
cifield->print_name_on(st);
format_helper( regalloc, st, fld_node, ":", j, &scobjs );
format_helper(regalloc, st, fld_node, ":", j, &scobjs);
} else {
format_helper( regalloc, st, fld_node, ", [", j, &scobjs );
format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
}
}
}
@ -483,7 +485,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
}
}
st->print_cr("");
if (caller() != NULL) caller()->format(regalloc, n, st);
if (caller() != NULL) caller()->format(regalloc, n, st);
}
@ -586,15 +588,15 @@ JVMState* JVMState::clone_deep(Compile* C) const {
uint CallNode::cmp( const Node &n ) const
{ return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
#ifndef PRODUCT
void CallNode::dump_req() const {
void CallNode::dump_req(outputStream *st) const {
// Dump the required inputs, enclosed in '(' and ')'
uint i; // Exit value of loop
for( i=0; i<req(); i++ ) { // For all required inputs
if( i == TypeFunc::Parms ) tty->print("(");
if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
else tty->print("_ ");
for (i = 0; i < req(); i++) { // For all required inputs
if (i == TypeFunc::Parms) st->print("(");
if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
else st->print("_ ");
}
tty->print(")");
st->print(")");
}
void CallNode::dump_spec(outputStream *st) const {

View File

@ -126,7 +126,7 @@ public:
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual uint match_edge(uint idx) const;
#ifndef PRODUCT
virtual void dump_req() const;
virtual void dump_req(outputStream *st = tty) const;
#endif
};
@ -147,7 +147,7 @@ class RethrowNode : public Node {
virtual uint match_edge(uint idx) const;
virtual uint ideal_reg() const { return NotAMachineReg; }
#ifndef PRODUCT
virtual void dump_req() const;
virtual void dump_req(outputStream *st = tty) const;
#endif
};
@ -579,7 +579,7 @@ public:
virtual uint match_edge(uint idx) const;
#ifndef PRODUCT
virtual void dump_req() const;
virtual void dump_req(outputStream *st = tty) const;
virtual void dump_spec(outputStream *st) const;
#endif
};

View File

@ -127,6 +127,7 @@ macro(DivL)
macro(DivMod)
macro(DivModI)
macro(DivModL)
macro(EncodeISOArray)
macro(EncodeP)
macro(EncodePKlass)
macro(ExpD)

View File

@ -692,7 +692,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn);
if (PrintInlining) {
if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
{ // Scope for timing the parser
@ -2049,7 +2049,7 @@ void Compile::Optimize() {
} // (End scope of igvn; run destructor if necessary for asserts.)
dump_inlining();
dump_inlining();
// A method with only infinite loops has no edges entering loops from root
{
NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
@ -3497,7 +3497,7 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
}
void Compile::dump_inlining() {
if (PrintInlining) {
if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
// Print inlining message for candidates that we couldn't inline
// for lack of space or non constant receiver
for (int i = 0; i < _late_inlines.length(); i++) {

View File

@ -553,7 +553,13 @@ void Parse::do_call() {
rtype = ctype;
}
} else {
assert(rtype == ctype, "mismatched return types"); // symbolic resolution enforces this
// Symbolic resolution enforces the types to be the same.
// NOTE: We must relax the assert for unloaded types because two
// different ciType instances of the same unloaded class type
// can appear to be "loaded" by different loaders (depending on
// the accessing class).
assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
err_msg_res("mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()));
}
// If the return type of the method is not loaded, assert that the

View File

@ -523,7 +523,8 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
case Op_AryEq:
case Op_StrComp:
case Op_StrEquals:
case Op_StrIndexOf: {
case Op_StrIndexOf:
case Op_EncodeISOArray: {
add_local_var(n, PointsToNode::ArgEscape);
delayed_worklist->push(n); // Process it later.
break;
@ -701,7 +702,8 @@ void ConnectionGraph::add_final_edges(Node *n) {
case Op_AryEq:
case Op_StrComp:
case Op_StrEquals:
case Op_StrIndexOf: {
case Op_StrIndexOf:
case Op_EncodeISOArray: {
// char[] arrays passed to string intrinsic do not escape but
// they are not scalar replaceable. Adjust escape state for them.
// Start from in(2) edge since in(1) is memory edge.
@ -2581,15 +2583,22 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
}
// Otherwise skip it (the call updated 'result' value).
} else if (result->Opcode() == Op_SCMemProj) {
assert(result->in(0)->is_LoadStore(), "sanity");
const Type *at = igvn->type(result->in(0)->in(MemNode::Address));
Node* mem = result->in(0);
Node* adr = NULL;
if (mem->is_LoadStore()) {
adr = mem->in(MemNode::Address);
} else {
assert(mem->Opcode() == Op_EncodeISOArray, "sanity");
adr = mem->in(3); // Memory edge corresponds to destination array
}
const Type *at = igvn->type(adr);
if (at != Type::TOP) {
assert (at->isa_ptr() != NULL, "pointer type required.");
int idx = C->get_alias_index(at->is_ptr());
assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
break;
}
result = result->in(0)->in(MemNode::Memory);
result = mem->in(MemNode::Memory);
}
}
if (result->is_Phi()) {
@ -2927,6 +2936,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
if (m->is_MergeMem()) {
assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
}
} else if (use->Opcode() == Op_EncodeISOArray) {
if (use->in(MemNode::Memory) == n || use->in(3) == n) {
// EncodeISOArray overwrites destination array
memnode_worklist.append_if_missing(use);
}
} else {
uint op = use->Opcode();
if (!(op == Op_CmpP || op == Op_Conv2B ||
@ -2962,6 +2976,16 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
n = n->as_MemBar()->proj_out(TypeFunc::Memory);
if (n == NULL)
continue;
} else if (n->Opcode() == Op_EncodeISOArray) {
// get the memory projection
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node *use = n->fast_out(i);
if (use->Opcode() == Op_SCMemProj) {
n = use;
break;
}
}
assert(n->Opcode() == Op_SCMemProj, "memory projection required");
} else {
assert(n->is_Mem(), "memory node required.");
Node *addr = n->in(MemNode::Address);
@ -2999,7 +3023,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
Node *use = n->fast_out(i);
if (use->is_Phi() || use->is_ClearArray()) {
memnode_worklist.append_if_missing(use);
} else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
} else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
continue;
memnode_worklist.append_if_missing(use);
@ -3010,6 +3034,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
} else if (use->is_MergeMem()) {
assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
} else if (use->Opcode() == Op_EncodeISOArray) {
if (use->in(MemNode::Memory) == n || use->in(3) == n) {
// EncodeISOArray overwrites destination array
memnode_worklist.append_if_missing(use);
}
} else {
uint op = use->Opcode();
if (!(op == Op_StoreCM ||

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -547,7 +547,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
// max. 2 chars allowed
if (value >= -9 && value <= 99) {
sprintf(buffer, INT64_FORMAT, value);
sprintf(buffer, JLONG_FORMAT, value);
print_prop(short_name, buffer);
} else {
print_prop(short_name, "L");

View File

@ -175,6 +175,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
case Op_StrEquals:
case Op_StrIndexOf:
case Op_AryEq:
case Op_EncodeISOArray:
// Not a legit memory op for implicit null check regardless of
// embedded loads
continue;

Some files were not shown because too many files have changed in this diff Show More