Merge
This commit is contained in:
commit
d7dd9d1027
@ -829,7 +829,7 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
jtreg: {
|
||||
server: "javare",
|
||||
revision: "4.2",
|
||||
build_number: "b11",
|
||||
build_number: "b12",
|
||||
checksum_file: "MD5_VALUES",
|
||||
file: "jtreg_bin-4.2.zip",
|
||||
environment_name: "JT_HOME",
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -68,6 +68,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
|
||||
$(TOPDIR)/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup \
|
||||
$(TOPDIR)/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorStackDepthInfo \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetNamedModule \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/IsModifiableModule \
|
||||
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/AddModuleReads \
|
||||
@ -101,6 +102,7 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_liboverflow := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libSimpleClassFileLoadHook := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorInfoTest := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorStackDepthInfoTest := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetNamedModuleTest := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libIsModifiableModuleTest := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libAddModuleReadsTest := -lc
|
||||
|
@ -17725,7 +17725,7 @@ instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "shl $dst, $src, $shift\t# vector (8B)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 8) {
|
||||
__ eor(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg),
|
||||
@ -17744,7 +17744,7 @@ instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "shl $dst, $src, $shift\t# vector (16B)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 8) {
|
||||
__ eor(as_FloatRegister($dst$$reg), __ T16B,
|
||||
as_FloatRegister($src$$reg),
|
||||
@ -17764,9 +17764,8 @@ instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshr $dst, $src, $shift\t# vector (8B)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 8) sh = 7;
|
||||
sh = -sh & 7;
|
||||
__ sshr(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg), sh);
|
||||
%}
|
||||
@ -17779,9 +17778,8 @@ instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshr $dst, $src, $shift\t# vector (16B)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 8) sh = 7;
|
||||
sh = -sh & 7;
|
||||
__ sshr(as_FloatRegister($dst$$reg), __ T16B,
|
||||
as_FloatRegister($src$$reg), sh);
|
||||
%}
|
||||
@ -17795,14 +17793,14 @@ instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushr $dst, $src, $shift\t# vector (8B)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 8) {
|
||||
__ eor(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($src$$reg));
|
||||
} else {
|
||||
__ ushr(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg), -sh & 7);
|
||||
as_FloatRegister($src$$reg), sh);
|
||||
}
|
||||
%}
|
||||
ins_pipe(vshift64_imm);
|
||||
@ -17814,14 +17812,14 @@ instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushr $dst, $src, $shift\t# vector (16B)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 8) {
|
||||
__ eor(as_FloatRegister($dst$$reg), __ T16B,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($src$$reg));
|
||||
} else {
|
||||
__ ushr(as_FloatRegister($dst$$reg), __ T16B,
|
||||
as_FloatRegister($src$$reg), -sh & 7);
|
||||
as_FloatRegister($src$$reg), sh);
|
||||
}
|
||||
%}
|
||||
ins_pipe(vshift128_imm);
|
||||
@ -17890,7 +17888,7 @@ instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "shl $dst, $src, $shift\t# vector (4H)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 16) {
|
||||
__ eor(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg),
|
||||
@ -17909,7 +17907,7 @@ instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "shl $dst, $src, $shift\t# vector (8H)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 16) {
|
||||
__ eor(as_FloatRegister($dst$$reg), __ T16B,
|
||||
as_FloatRegister($src$$reg),
|
||||
@ -17929,9 +17927,8 @@ instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshr $dst, $src, $shift\t# vector (4H)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 16) sh = 15;
|
||||
sh = -sh & 15;
|
||||
__ sshr(as_FloatRegister($dst$$reg), __ T4H,
|
||||
as_FloatRegister($src$$reg), sh);
|
||||
%}
|
||||
@ -17944,9 +17941,8 @@ instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshr $dst, $src, $shift\t# vector (8H)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 16) sh = 15;
|
||||
sh = -sh & 15;
|
||||
__ sshr(as_FloatRegister($dst$$reg), __ T8H,
|
||||
as_FloatRegister($src$$reg), sh);
|
||||
%}
|
||||
@ -17960,14 +17956,14 @@ instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushr $dst, $src, $shift\t# vector (4H)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 16) {
|
||||
__ eor(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($src$$reg));
|
||||
} else {
|
||||
__ ushr(as_FloatRegister($dst$$reg), __ T4H,
|
||||
as_FloatRegister($src$$reg), -sh & 15);
|
||||
as_FloatRegister($src$$reg), sh);
|
||||
}
|
||||
%}
|
||||
ins_pipe(vshift64_imm);
|
||||
@ -17979,14 +17975,14 @@ instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushr $dst, $src, $shift\t# vector (8H)" %}
|
||||
ins_encode %{
|
||||
int sh = (int)$shift$$constant & 31;
|
||||
int sh = (int)$shift$$constant;
|
||||
if (sh >= 16) {
|
||||
__ eor(as_FloatRegister($dst$$reg), __ T16B,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($src$$reg));
|
||||
} else {
|
||||
__ ushr(as_FloatRegister($dst$$reg), __ T8H,
|
||||
as_FloatRegister($src$$reg), -sh & 15);
|
||||
as_FloatRegister($src$$reg), sh);
|
||||
}
|
||||
%}
|
||||
ins_pipe(vshift128_imm);
|
||||
@ -18054,7 +18050,7 @@ instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ shl(as_FloatRegister($dst$$reg), __ T2S,
|
||||
as_FloatRegister($src$$reg),
|
||||
(int)$shift$$constant & 31);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift64_imm);
|
||||
%}
|
||||
@ -18067,7 +18063,7 @@ instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ shl(as_FloatRegister($dst$$reg), __ T4S,
|
||||
as_FloatRegister($src$$reg),
|
||||
(int)$shift$$constant & 31);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift128_imm);
|
||||
%}
|
||||
@ -18080,7 +18076,7 @@ instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ sshr(as_FloatRegister($dst$$reg), __ T2S,
|
||||
as_FloatRegister($src$$reg),
|
||||
-(int)$shift$$constant & 31);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift64_imm);
|
||||
%}
|
||||
@ -18093,7 +18089,7 @@ instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ sshr(as_FloatRegister($dst$$reg), __ T4S,
|
||||
as_FloatRegister($src$$reg),
|
||||
-(int)$shift$$constant & 31);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift128_imm);
|
||||
%}
|
||||
@ -18106,7 +18102,7 @@ instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ ushr(as_FloatRegister($dst$$reg), __ T2S,
|
||||
as_FloatRegister($src$$reg),
|
||||
-(int)$shift$$constant & 31);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift64_imm);
|
||||
%}
|
||||
@ -18119,7 +18115,7 @@ instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ ushr(as_FloatRegister($dst$$reg), __ T4S,
|
||||
as_FloatRegister($src$$reg),
|
||||
-(int)$shift$$constant & 31);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift128_imm);
|
||||
%}
|
||||
@ -18159,7 +18155,7 @@ instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ shl(as_FloatRegister($dst$$reg), __ T2D,
|
||||
as_FloatRegister($src$$reg),
|
||||
(int)$shift$$constant & 63);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift128_imm);
|
||||
%}
|
||||
@ -18172,7 +18168,7 @@ instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ sshr(as_FloatRegister($dst$$reg), __ T2D,
|
||||
as_FloatRegister($src$$reg),
|
||||
-(int)$shift$$constant & 63);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift128_imm);
|
||||
%}
|
||||
@ -18185,7 +18181,7 @@ instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_encode %{
|
||||
__ ushr(as_FloatRegister($dst$$reg), __ T2D,
|
||||
as_FloatRegister($src$$reg),
|
||||
-(int)$shift$$constant & 63);
|
||||
(int)$shift$$constant);
|
||||
%}
|
||||
ins_pipe(vshift128_imm);
|
||||
%}
|
||||
|
@ -518,6 +518,7 @@ class Assembler : public AbstractAssembler {
|
||||
XXMRGHW_OPCODE = (60u << OPCODE_SHIFT | 18u << 3),
|
||||
XXMRGLW_OPCODE = (60u << OPCODE_SHIFT | 50u << 3),
|
||||
XXSPLTW_OPCODE = (60u << OPCODE_SHIFT | 164u << 2),
|
||||
XXLOR_OPCODE = (60u << OPCODE_SHIFT | 146u << 3),
|
||||
XXLXOR_OPCODE = (60u << OPCODE_SHIFT | 154u << 3),
|
||||
XXLEQV_OPCODE = (60u << OPCODE_SHIFT | 186u << 3),
|
||||
|
||||
@ -2162,6 +2163,7 @@ class Assembler : public AbstractAssembler {
|
||||
inline void mtvsrd( VectorSRegister d, Register a);
|
||||
inline void mtvsrwz( VectorSRegister d, Register a);
|
||||
inline void xxspltw( VectorSRegister d, VectorSRegister b, int ui2);
|
||||
inline void xxlor( VectorSRegister d, VectorSRegister a, VectorSRegister b);
|
||||
inline void xxlxor( VectorSRegister d, VectorSRegister a, VectorSRegister b);
|
||||
inline void xxleqv( VectorSRegister d, VectorSRegister a, VectorSRegister b);
|
||||
|
||||
|
@ -766,6 +766,7 @@ inline void Assembler::stxvd2x( VectorSRegister d, Register s1, Register s2) { e
|
||||
inline void Assembler::mtvsrd( VectorSRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vsrt(d) | ra(a)); }
|
||||
inline void Assembler::mtvsrwz( VectorSRegister d, Register a) { emit_int32( MTVSRWZ_OPCODE | vsrt(d) | ra(a)); }
|
||||
inline void Assembler::xxspltw( VectorSRegister d, VectorSRegister b, int ui2) { emit_int32( XXSPLTW_OPCODE | vsrt(d) | vsrb(b) | xxsplt_uim(uimm(ui2,2))); }
|
||||
inline void Assembler::xxlor( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLOR_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
|
||||
inline void Assembler::xxlxor( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLXOR_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
|
||||
inline void Assembler::xxleqv( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLEQV_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
|
||||
inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
|
||||
|
@ -1656,9 +1656,9 @@ const RegMask &MachLoadPollAddrLateNode::out_RegMask() const {
|
||||
|
||||
// =============================================================================
|
||||
|
||||
// Figure out which register class each belongs in: rc_int, rc_float or
|
||||
// Figure out which register class each belongs in: rc_int, rc_float, rc_vs or
|
||||
// rc_stack.
|
||||
enum RC { rc_bad, rc_int, rc_float, rc_stack };
|
||||
enum RC { rc_bad, rc_int, rc_float, rc_vs, rc_stack };
|
||||
|
||||
static enum RC rc_class(OptoReg::Name reg) {
|
||||
// Return the register class for the given register. The given register
|
||||
@ -1673,6 +1673,9 @@ static enum RC rc_class(OptoReg::Name reg) {
|
||||
// We have 64 floating-point register halves, starting at index 64.
|
||||
if (reg < 64+64) return rc_float;
|
||||
|
||||
// We have 64 vector-scalar registers, starting at index 128.
|
||||
if (reg < 64+64+64) return rc_vs;
|
||||
|
||||
// Between float regs & stack are the flags regs.
|
||||
assert(OptoReg::is_stack(reg) || reg < 64+64+64, "blow up if spilling flags");
|
||||
|
||||
@ -1735,6 +1738,58 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
|
||||
if (src_lo == dst_lo && src_hi == dst_hi)
|
||||
return size; // Self copy, no move.
|
||||
|
||||
if (bottom_type()->isa_vect() != NULL && ideal_reg() == Op_VecX) {
|
||||
// Memory->Memory Spill.
|
||||
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
|
||||
int src_offset = ra_->reg2offset(src_lo);
|
||||
int dst_offset = ra_->reg2offset(dst_lo);
|
||||
if (cbuf) {
|
||||
MacroAssembler _masm(cbuf);
|
||||
__ ld(R0, src_offset, R1_SP);
|
||||
__ std(R0, dst_offset, R1_SP);
|
||||
__ ld(R0, src_offset+8, R1_SP);
|
||||
__ std(R0, dst_offset+8, R1_SP);
|
||||
}
|
||||
size += 16;
|
||||
}
|
||||
// VectorSRegister->Memory Spill.
|
||||
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) {
|
||||
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
|
||||
int dst_offset = ra_->reg2offset(dst_lo);
|
||||
if (cbuf) {
|
||||
MacroAssembler _masm(cbuf);
|
||||
__ addi(R0, R1_SP, dst_offset);
|
||||
__ stxvd2x(Rsrc, R0);
|
||||
}
|
||||
size += 8;
|
||||
}
|
||||
// Memory->VectorSRegister Spill.
|
||||
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) {
|
||||
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
|
||||
int src_offset = ra_->reg2offset(src_lo);
|
||||
if (cbuf) {
|
||||
MacroAssembler _masm(cbuf);
|
||||
__ addi(R0, R1_SP, src_offset);
|
||||
__ lxvd2x(Rdst, R0);
|
||||
}
|
||||
size += 8;
|
||||
}
|
||||
// VectorSRegister->VectorSRegister.
|
||||
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) {
|
||||
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
|
||||
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
|
||||
if (cbuf) {
|
||||
MacroAssembler _masm(cbuf);
|
||||
__ xxlor(Rdst, Rsrc, Rsrc);
|
||||
}
|
||||
size += 4;
|
||||
}
|
||||
else {
|
||||
ShouldNotReachHere(); // No VSR spill.
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
// --------------------------------------
|
||||
// Memory->Memory Spill. Use R0 to hold the value.
|
||||
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
|
||||
@ -3524,7 +3579,7 @@ encode %{
|
||||
assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
|
||||
%}
|
||||
|
||||
enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc) %{
|
||||
enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc, iRegLdst tmp) %{
|
||||
// Create new nodes.
|
||||
|
||||
// Make an operand with the bit pattern to load as float.
|
||||
@ -3533,8 +3588,8 @@ encode %{
|
||||
|
||||
loadConLReplicatedNodesTuple loadConLNodes =
|
||||
loadConLReplicatedNodesTuple_create(C, ra_, n_toc, op_repl, op_dst, op_zero,
|
||||
OptoReg::Name(R20_H_num), OptoReg::Name(R20_num),
|
||||
OptoReg::Name(VSR11_num), OptoReg::Name(VSR10_num));
|
||||
ra_->get_reg_second(n_tmp), ra_->get_reg_first(n_tmp),
|
||||
ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
// Push new nodes.
|
||||
if (loadConLNodes._large_hi) { nodes->push(loadConLNodes._large_hi); }
|
||||
@ -14013,12 +14068,13 @@ instruct repl4F_reg_Ex(vecX dst, regF src) %{
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct repl4F_immF_Ex(vecX dst, immF src) %{
|
||||
instruct repl4F_immF_Ex(vecX dst, immF src, iRegLdst tmp) %{
|
||||
match(Set dst (ReplicateF src));
|
||||
predicate(n->as_Vector()->length() == 4);
|
||||
effect(TEMP tmp);
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
|
||||
postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase) );
|
||||
postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase, tmp) );
|
||||
%}
|
||||
|
||||
instruct repl4F_immF0(vecX dst, immF_0 zero) %{
|
||||
|
@ -109,8 +109,7 @@ void VM_Version::initialize() {
|
||||
|
||||
if (PowerArchitecturePPC64 >= 8) {
|
||||
if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
|
||||
// TODO: Switch on when it works stable. Currently, MachSpillCopyNode::implementation code is missing.
|
||||
//FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
|
||||
FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
|
||||
}
|
||||
} else {
|
||||
if (SuperwordUseVSX) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -991,8 +991,8 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
int offset = -1;
|
||||
|
||||
switch (c->type()) {
|
||||
case T_FLOAT: type = T_INT; // Float constants are stored by int store instructions.
|
||||
case T_INT:
|
||||
case T_FLOAT:
|
||||
case T_ADDRESS: {
|
||||
LIR_Opr tmp = FrameMap::O7_opr;
|
||||
int value = c->as_jint_bits();
|
||||
@ -1202,6 +1202,7 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
__ stw(tmp, to.base(), to.disp());
|
||||
break;
|
||||
}
|
||||
case T_ADDRESS:
|
||||
case T_OBJECT: {
|
||||
Register tmp = O7;
|
||||
Address from = frame_map()->address_for_slot(src->single_stack_ix());
|
||||
@ -1355,7 +1356,6 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
|
||||
bool wide, bool unaligned) {
|
||||
@ -2265,10 +2265,10 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
|
||||
op->obj()->as_register() == O0 &&
|
||||
op->klass()->as_register() == G5, "must be");
|
||||
if (op->init_check()) {
|
||||
add_debug_info_for_null_check_here(op->stub()->info());
|
||||
__ ldub(op->klass()->as_register(),
|
||||
in_bytes(InstanceKlass::init_state_offset()),
|
||||
op->tmp1()->as_register());
|
||||
add_debug_info_for_null_check_here(op->stub()->info());
|
||||
__ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
|
||||
__ delayed()->nop();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,32 +32,32 @@
|
||||
// (see c1_globals.hpp)
|
||||
|
||||
#ifndef TIERED
|
||||
define_pd_global(bool, BackgroundCompilation, true );
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
define_pd_global(bool, InlineIntrinsics, true );
|
||||
define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, false);
|
||||
define_pd_global(bool, UseOnStackReplacement, true );
|
||||
define_pd_global(bool, TieredCompilation, false);
|
||||
define_pd_global(intx, CompileThreshold, 1000 ); // Design center runs on 1.3.1
|
||||
define_pd_global(bool, BackgroundCompilation, true );
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
define_pd_global(bool, InlineIntrinsics, true );
|
||||
define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, false);
|
||||
define_pd_global(bool, UseOnStackReplacement, true );
|
||||
define_pd_global(bool, TieredCompilation, false);
|
||||
define_pd_global(intx, CompileThreshold, 1000 ); // Design center runs on 1.3.1
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 1400 );
|
||||
define_pd_global(bool, UseTLAB, true );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, FreqInlineSize, 325 );
|
||||
define_pd_global(bool, ResizeTLAB, true );
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, MetaspaceSize, 12*M );
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 16*K );
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, OnStackReplacePercentage, 1400 );
|
||||
define_pd_global(bool, UseTLAB, true );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, FreqInlineSize, 325 );
|
||||
define_pd_global(bool, ResizeTLAB, true );
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, MetaspaceSize, 12*M );
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 16*K );
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
#endif // !TIERED
|
||||
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,12 +71,12 @@ define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
// sequence of instructions to load a 64 bit pointer.
|
||||
//
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -107,8 +107,8 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
|
||||
#ifdef ASSERT
|
||||
// read the value once
|
||||
intptr_t data = method_holder->data();
|
||||
address destination = jump->jump_destination();
|
||||
volatile intptr_t data = method_holder->data();
|
||||
volatile address destination = jump->jump_destination();
|
||||
assert(data == 0 || data == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -837,6 +837,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
// With G1, don't generate the call if we statically know that the target in uninitialized
|
||||
if (!dest_uninitialized) {
|
||||
Register tmp = O5;
|
||||
assert_different_registers(addr, count, tmp);
|
||||
Label filtered;
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
|
||||
} else {
|
||||
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
|
||||
"Assumption");
|
||||
__ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
|
||||
}
|
||||
// Is marking active?
|
||||
__ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
|
||||
|
||||
__ save_frame(0);
|
||||
// Save the necessary global regs... will be used after.
|
||||
if (addr->is_global()) {
|
||||
@ -856,6 +870,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ mov(L1, count);
|
||||
}
|
||||
__ restore();
|
||||
|
||||
__ bind(filtered);
|
||||
DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableForRS:
|
||||
|
@ -1510,11 +1510,11 @@ void Assembler::call(Address adr) {
|
||||
}
|
||||
|
||||
void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
|
||||
assert(entry != NULL, "call most probably wrong");
|
||||
InstructionMark im(this);
|
||||
emit_int8((unsigned char)0xE8);
|
||||
intptr_t disp = entry - (pc() + sizeof(int32_t));
|
||||
assert(is_simm32(disp), "must be 32bit offset (call2)");
|
||||
// Entry is NULL in case of a scratch emit.
|
||||
assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
|
||||
// Technically, should use call32_operand, but this format is
|
||||
// implied by the fact that we're emitting a call instruction.
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1543,10 +1543,10 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
|
||||
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
|
||||
if (op->init_check()) {
|
||||
add_debug_info_for_null_check_here(op->stub()->info());
|
||||
__ cmpb(Address(op->klass()->as_register(),
|
||||
InstanceKlass::init_state_offset()),
|
||||
InstanceKlass::fully_initialized);
|
||||
add_debug_info_for_null_check_here(op->stub()->info());
|
||||
__ jcc(Assembler::notEqual, *op->stub()->entry());
|
||||
}
|
||||
__ allocate_object(op->obj()->as_register(),
|
||||
@ -2580,7 +2580,9 @@ void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right,
|
||||
move_regs(lreg, rax);
|
||||
|
||||
int idivl_offset = __ corrected_idivl(rreg);
|
||||
add_debug_info_for_div0(idivl_offset, info);
|
||||
if (ImplicitDiv0Checks) {
|
||||
add_debug_info_for_div0(idivl_offset, info);
|
||||
}
|
||||
if (code == lir_irem) {
|
||||
move_regs(rdx, dreg); // result is in rdx
|
||||
} else {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -572,6 +572,8 @@ void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
|
||||
if (!ImplicitDiv0Checks) {
|
||||
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
|
||||
__ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
|
||||
// Idiv/irem cannot trap (passing info would generate an assertion).
|
||||
info = NULL;
|
||||
}
|
||||
LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation
|
||||
if (x->op() == Bytecodes::_irem) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -45,15 +45,15 @@ define_pd_global(intx, CompileThreshold, 1500 );
|
||||
define_pd_global(intx, OnStackReplacePercentage, 933 );
|
||||
define_pd_global(intx, FreqInlineSize, 325 );
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, MetaspaceSize, 12*M );
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,26 +48,26 @@ define_pd_global(intx, FreqInlineSize, 325);
|
||||
define_pd_global(intx, MinJumpTableSize, 10);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 30);
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, INTPRESSURE, 13);
|
||||
define_pd_global(intx, FLOATPRESSURE, 14);
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(intx, INTPRESSURE, 13);
|
||||
define_pd_global(intx, FLOATPRESSURE, 14);
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
|
||||
#else
|
||||
define_pd_global(intx, INTPRESSURE, 6);
|
||||
define_pd_global(intx, FLOATPRESSURE, 6);
|
||||
define_pd_global(intx, InteriorEntryAlignment, 4);
|
||||
define_pd_global(intx, INTPRESSURE, 6);
|
||||
define_pd_global(intx, FLOATPRESSURE, 6);
|
||||
define_pd_global(intx, InteriorEntryAlignment, 4);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 4*K);
|
||||
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
|
||||
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
|
||||
@ -84,10 +84,10 @@ define_pd_global(bool, OptoRegScheduling, true);
|
||||
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -160,8 +160,8 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
|
||||
#ifdef ASSERT
|
||||
// read the value once
|
||||
intptr_t data = method_holder->data();
|
||||
address destination = jump->jump_destination();
|
||||
volatile intptr_t data = method_holder->data();
|
||||
volatile address destination = jump->jump_destination();
|
||||
assert(data == 0 || data == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
|
@ -516,6 +516,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
// Add in the index
|
||||
addptr(result, tmp);
|
||||
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
// The resulting oop is null if the reference is not yet resolved.
|
||||
// It is Universe::the_null_sentinel() if the reference resolved to NULL via condy.
|
||||
}
|
||||
|
||||
// load cpool->resolved_klass_at(index)
|
||||
|
@ -836,7 +836,8 @@ void MacroAssembler::warn(const char* msg) {
|
||||
andq(rsp, -16); // align stack as required by push_CPU_state and call
|
||||
push_CPU_state(); // keeps alignment at 16 bytes
|
||||
lea(c_rarg0, ExternalAddress((address) msg));
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
|
||||
lea(rax, ExternalAddress(CAST_FROM_FN_PTR(address, warning)));
|
||||
call(rax);
|
||||
pop_CPU_state();
|
||||
mov(rsp, rbp);
|
||||
pop(rbp);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -589,7 +589,7 @@ void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
|
||||
|
||||
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
|
||||
if (!TraceMethodHandles) return;
|
||||
BLOCK_COMMENT("trace_method_handle {");
|
||||
BLOCK_COMMENT(err_msg("trace_method_handle %s {", adaptername));
|
||||
__ enter();
|
||||
__ andptr(rsp, -16); // align stack if needed for FPU state
|
||||
__ pusha();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -145,7 +145,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
||||
// We assume caller has already has return address slot on the stack
|
||||
// We push epb twice in this sequence because we want the real rbp,
|
||||
// to be under the return like a normal enter and we want to use pusha
|
||||
// We push by hand instead of pusing push
|
||||
// We push by hand instead of using push.
|
||||
__ enter();
|
||||
__ pusha();
|
||||
__ pushf();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -679,10 +679,28 @@ class StubGenerator: public StubCodeGenerator {
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
// With G1, don't generate the call if we statically know that the target in uninitialized
|
||||
if (!uninitialized_target) {
|
||||
Register thread = rax;
|
||||
Label filtered;
|
||||
__ push(thread);
|
||||
__ get_thread(thread);
|
||||
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
SATBMarkQueue::byte_offset_of_active()));
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ cmpl(in_progress, 0);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ cmpb(in_progress, 0);
|
||||
}
|
||||
__ pop(thread);
|
||||
__ jcc(Assembler::equal, filtered);
|
||||
|
||||
__ pusha(); // push registers
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
|
||||
start, count);
|
||||
__ popa();
|
||||
|
||||
__ bind(filtered);
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableForRS:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1201,6 +1201,18 @@ class StubGenerator: public StubCodeGenerator {
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
// With G1, don't generate the call if we statically know that the target in uninitialized
|
||||
if (!dest_uninitialized) {
|
||||
Label filtered;
|
||||
Address in_progress(r15_thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
SATBMarkQueue::byte_offset_of_active()));
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ cmpl(in_progress, 0);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ cmpb(in_progress, 0);
|
||||
}
|
||||
__ jcc(Assembler::equal, filtered);
|
||||
|
||||
__ pusha(); // push registers
|
||||
if (count == c_rarg0) {
|
||||
if (addr == c_rarg1) {
|
||||
@ -1216,6 +1228,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
|
||||
__ popa();
|
||||
|
||||
__ bind(filtered);
|
||||
}
|
||||
break;
|
||||
case BarrierSet::CardTableForRS:
|
||||
|
@ -419,7 +419,7 @@ void TemplateTable::sipush() {
|
||||
void TemplateTable::ldc(bool wide) {
|
||||
transition(vtos, vtos);
|
||||
Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
|
||||
Label call_ldc, notFloat, notClass, Done;
|
||||
Label call_ldc, notFloat, notClass, notInt, Done;
|
||||
|
||||
if (wide) {
|
||||
__ get_unsigned_2_byte_index_at_bcp(rbx, 1);
|
||||
@ -465,19 +465,18 @@ void TemplateTable::ldc(bool wide) {
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notFloat);
|
||||
#ifdef ASSERT
|
||||
{
|
||||
Label L;
|
||||
__ cmpl(rdx, JVM_CONSTANT_Integer);
|
||||
__ jcc(Assembler::equal, L);
|
||||
// String and Object are rewritten to fast_aldc
|
||||
__ stop("unexpected tag type in ldc");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
// itos JVM_CONSTANT_Integer only
|
||||
__ cmpl(rdx, JVM_CONSTANT_Integer);
|
||||
__ jccb(Assembler::notEqual, notInt);
|
||||
|
||||
// itos
|
||||
__ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
|
||||
__ push(itos);
|
||||
__ jmp(Done);
|
||||
|
||||
// assume the tag is for condy; if not, the VM runtime will tell us
|
||||
__ bind(notInt);
|
||||
condy_helper(Done);
|
||||
|
||||
__ bind(Done);
|
||||
}
|
||||
|
||||
@ -487,6 +486,7 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
|
||||
Register result = rax;
|
||||
Register tmp = rdx;
|
||||
Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
|
||||
int index_size = wide ? sizeof(u2) : sizeof(u1);
|
||||
|
||||
Label resolved;
|
||||
@ -496,17 +496,28 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
assert_different_registers(result, tmp);
|
||||
__ get_cache_index_at_bcp(tmp, 1, index_size);
|
||||
__ load_resolved_reference_at_index(result, tmp);
|
||||
__ testl(result, result);
|
||||
__ testptr(result, result);
|
||||
__ jcc(Assembler::notZero, resolved);
|
||||
|
||||
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
|
||||
|
||||
// first time invocation - must resolve first
|
||||
__ movl(tmp, (int)bytecode());
|
||||
__ call_VM(result, entry, tmp);
|
||||
|
||||
__ movl(rarg, (int)bytecode());
|
||||
__ call_VM(result, entry, rarg);
|
||||
__ bind(resolved);
|
||||
|
||||
{ // Check for the null sentinel.
|
||||
// If we just called the VM, that already did the mapping for us,
|
||||
// but it's harmless to retry.
|
||||
Label notNull;
|
||||
ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
|
||||
__ movptr(tmp, null_sentinel);
|
||||
__ cmpptr(tmp, result);
|
||||
__ jccb(Assembler::notEqual, notNull);
|
||||
__ xorptr(result, result); // NULL object reference
|
||||
__ bind(notNull);
|
||||
}
|
||||
|
||||
if (VerifyOops) {
|
||||
__ verify_oop(result);
|
||||
}
|
||||
@ -514,7 +525,7 @@ void TemplateTable::fast_aldc(bool wide) {
|
||||
|
||||
void TemplateTable::ldc2_w() {
|
||||
transition(vtos, vtos);
|
||||
Label Long, Done;
|
||||
Label notDouble, notLong, Done;
|
||||
__ get_unsigned_2_byte_index_at_bcp(rbx, 1);
|
||||
|
||||
__ get_cpool_and_tags(rcx, rax);
|
||||
@ -522,25 +533,143 @@ void TemplateTable::ldc2_w() {
|
||||
const int tags_offset = Array<u1>::base_offset_in_bytes();
|
||||
|
||||
// get type
|
||||
__ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
|
||||
JVM_CONSTANT_Double);
|
||||
__ jccb(Assembler::notEqual, Long);
|
||||
__ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
|
||||
__ cmpl(rdx, JVM_CONSTANT_Double);
|
||||
__ jccb(Assembler::notEqual, notDouble);
|
||||
|
||||
// dtos
|
||||
__ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
|
||||
__ push(dtos);
|
||||
|
||||
__ jmpb(Done);
|
||||
__ bind(Long);
|
||||
__ jmp(Done);
|
||||
__ bind(notDouble);
|
||||
__ cmpl(rdx, JVM_CONSTANT_Long);
|
||||
__ jccb(Assembler::notEqual, notLong);
|
||||
|
||||
// ltos
|
||||
__ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
|
||||
NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
|
||||
__ push(ltos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notLong);
|
||||
condy_helper(Done);
|
||||
|
||||
__ bind(Done);
|
||||
}
|
||||
|
||||
void TemplateTable::condy_helper(Label& Done) {
|
||||
const Register obj = rax;
|
||||
const Register off = rbx;
|
||||
const Register flags = rcx;
|
||||
const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
|
||||
__ movl(rarg, (int)bytecode());
|
||||
call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
|
||||
#ifndef _LP64
|
||||
// borrow rdi from locals
|
||||
__ get_thread(rdi);
|
||||
__ get_vm_result_2(flags, rdi);
|
||||
__ restore_locals();
|
||||
#else
|
||||
__ get_vm_result_2(flags, r15_thread);
|
||||
#endif
|
||||
// VMr = obj = base address to find primitive value to push
|
||||
// VMr2 = flags = (tos, off) using format of CPCE::_flags
|
||||
__ movl(off, flags);
|
||||
__ andl(off, ConstantPoolCacheEntry::field_index_mask);
|
||||
const Address field(obj, off, Address::times_1, 0*wordSize);
|
||||
|
||||
// What sort of thing are we loading?
|
||||
__ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
|
||||
__ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
|
||||
|
||||
switch (bytecode()) {
|
||||
case Bytecodes::_ldc:
|
||||
case Bytecodes::_ldc_w:
|
||||
{
|
||||
// tos in (itos, ftos, stos, btos, ctos, ztos)
|
||||
Label notInt, notFloat, notShort, notByte, notChar, notBool;
|
||||
__ cmpl(flags, itos);
|
||||
__ jcc(Assembler::notEqual, notInt);
|
||||
// itos
|
||||
__ movl(rax, field);
|
||||
__ push(itos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notInt);
|
||||
__ cmpl(flags, ftos);
|
||||
__ jcc(Assembler::notEqual, notFloat);
|
||||
// ftos
|
||||
__ load_float(field);
|
||||
__ push(ftos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notFloat);
|
||||
__ cmpl(flags, stos);
|
||||
__ jcc(Assembler::notEqual, notShort);
|
||||
// stos
|
||||
__ load_signed_short(rax, field);
|
||||
__ push(stos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notShort);
|
||||
__ cmpl(flags, btos);
|
||||
__ jcc(Assembler::notEqual, notByte);
|
||||
// btos
|
||||
__ load_signed_byte(rax, field);
|
||||
__ push(btos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmpl(flags, ctos);
|
||||
__ jcc(Assembler::notEqual, notChar);
|
||||
// ctos
|
||||
__ load_unsigned_short(rax, field);
|
||||
__ push(ctos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notChar);
|
||||
__ cmpl(flags, ztos);
|
||||
__ jcc(Assembler::notEqual, notBool);
|
||||
// ztos
|
||||
__ load_signed_byte(rax, field);
|
||||
__ push(ztos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notBool);
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_ldc2_w:
|
||||
{
|
||||
Label notLong, notDouble;
|
||||
__ cmpl(flags, ltos);
|
||||
__ jcc(Assembler::notEqual, notLong);
|
||||
// ltos
|
||||
__ movptr(rax, field);
|
||||
NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
|
||||
__ push(ltos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notLong);
|
||||
__ cmpl(flags, dtos);
|
||||
__ jcc(Assembler::notEqual, notDouble);
|
||||
// dtos
|
||||
__ load_double(field);
|
||||
__ push(dtos);
|
||||
__ jmp(Done);
|
||||
|
||||
__ bind(notDouble);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
__ stop("bad ldc/condy");
|
||||
}
|
||||
|
||||
void TemplateTable::locals_index(Register reg, int offset) {
|
||||
__ load_unsigned_byte(reg, at_bcp(offset));
|
||||
__ negptr(reg);
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
//----------REGISTER DEFINITION BLOCK------------------------------------------
|
||||
// This information is used by the matcher and the register allocator to
|
||||
// describe individual registers and classes of registers within the target
|
||||
// archtecture.
|
||||
// architecture.
|
||||
|
||||
register %{
|
||||
//----------Architecture Description Register Definitions----------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,6 @@
|
||||
#include "misc_aix.hpp"
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <sys/systemcfg.h>
|
||||
|
||||
// Handle to the libperfstat.
|
||||
static void* g_libhandle = NULL;
|
||||
@ -158,17 +157,6 @@ cid_t libperfstat::wpar_getcid() {
|
||||
|
||||
//////////////////// convenience functions, release-independent /////////////////////////////
|
||||
|
||||
// Excerpts from systemcfg.h definitions newer than AIX 5.3 (our oldest build platform)
|
||||
|
||||
#define PV_6 0x100000 /* Power PC 6 */
|
||||
#define PV_6_1 0x100001 /* Power PC 6 DD1.x */
|
||||
#define PV_7 0x200000 /* Power PC 7 */
|
||||
#define PV_5_Compat 0x0F8000 /* Power PC 5 */
|
||||
#define PV_6_Compat 0x108000 /* Power PC 6 */
|
||||
#define PV_7_Compat 0x208000 /* Power PC 7 */
|
||||
#define PV_8 0x300000 /* Power PC 8 */
|
||||
#define PV_8_Compat 0x308000 /* Power PC 8 */
|
||||
|
||||
|
||||
// Retrieve global cpu information.
|
||||
bool libperfstat::get_cpuinfo(cpuinfo_t* pci) {
|
||||
@ -191,7 +179,7 @@ bool libperfstat::get_cpuinfo(cpuinfo_t* pci) {
|
||||
}
|
||||
|
||||
// Global cpu information.
|
||||
strcpy (pci->description, psct.description);
|
||||
strcpy(pci->description, psct.description);
|
||||
pci->processorHZ = psct.processorHZ;
|
||||
pci->ncpus = psct.ncpus;
|
||||
for (int i = 0; i < 3; i++) {
|
||||
@ -203,45 +191,6 @@ bool libperfstat::get_cpuinfo(cpuinfo_t* pci) {
|
||||
pci->idle_clock_ticks = psct.idle;
|
||||
pci->wait_clock_ticks = psct.wait;
|
||||
|
||||
// Get the processor version from _system_configuration.
|
||||
switch (_system_configuration.version) {
|
||||
case PV_8:
|
||||
strcpy(pci->version, "Power PC 8");
|
||||
break;
|
||||
case PV_7:
|
||||
strcpy(pci->version, "Power PC 7");
|
||||
break;
|
||||
case PV_6_1:
|
||||
strcpy(pci->version, "Power PC 6 DD1.x");
|
||||
break;
|
||||
case PV_6:
|
||||
strcpy(pci->version, "Power PC 6");
|
||||
break;
|
||||
case PV_5:
|
||||
strcpy(pci->version, "Power PC 5");
|
||||
break;
|
||||
case PV_5_2:
|
||||
strcpy(pci->version, "Power PC 5_2");
|
||||
break;
|
||||
case PV_5_3:
|
||||
strcpy(pci->version, "Power PC 5_3");
|
||||
break;
|
||||
case PV_5_Compat:
|
||||
strcpy(pci->version, "PV_5_Compat");
|
||||
break;
|
||||
case PV_6_Compat:
|
||||
strcpy(pci->version, "PV_6_Compat");
|
||||
break;
|
||||
case PV_7_Compat:
|
||||
strcpy(pci->version, "PV_7_Compat");
|
||||
break;
|
||||
case PV_8_Compat:
|
||||
strcpy(pci->version, "PV_8_Compat");
|
||||
break;
|
||||
default:
|
||||
strcpy(pci->version, "unknown");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -942,7 +942,6 @@ public:
|
||||
int ncpus; // number of active logical processors
|
||||
double loadavg[3]; // (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
|
||||
// To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>.
|
||||
char version[20]; // processor version from _system_configuration (sys/systemcfg.h)
|
||||
unsigned long long user_clock_ticks; // raw total number of clock ticks spent in user mode
|
||||
unsigned long long sys_clock_ticks; // raw total number of clock ticks spent in system mode
|
||||
unsigned long long idle_clock_ticks; // raw total number of clock ticks spent idle
|
||||
@ -965,7 +964,6 @@ public:
|
||||
static bool get_partitioninfo(partitioninfo_t* ppi);
|
||||
static bool get_cpuinfo(cpuinfo_t* pci);
|
||||
static bool get_wparinfo(wparinfo_t* pwi);
|
||||
|
||||
};
|
||||
|
||||
#endif // OS_AIX_VM_LIBPERFSTAT_AIX_HPP
|
||||
|
@ -117,7 +117,7 @@ int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
|
||||
#if !defined(_AIXVERSION_610)
|
||||
extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
|
||||
extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
|
||||
extern "C" int getargs (procsinfo*, int, char*, int);
|
||||
extern "C" int getargs(procsinfo*, int, char*, int);
|
||||
#endif
|
||||
|
||||
#define MAX_PATH (2 * K)
|
||||
@ -130,6 +130,32 @@ extern "C" int getargs (procsinfo*, int, char*, int);
|
||||
#define ERROR_MP_VMGETINFO_FAILED 102
|
||||
#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
|
||||
|
||||
// excerpts from systemcfg.h that might be missing on older os levels
|
||||
#ifndef PV_5_Compat
|
||||
#define PV_5_Compat 0x0F8000 /* Power PC 5 */
|
||||
#endif
|
||||
#ifndef PV_6
|
||||
#define PV_6 0x100000 /* Power PC 6 */
|
||||
#endif
|
||||
#ifndef PV_6_1
|
||||
#define PV_6_1 0x100001 /* Power PC 6 DD1.x */
|
||||
#endif
|
||||
#ifndef PV_6_Compat
|
||||
#define PV_6_Compat 0x108000 /* Power PC 6 */
|
||||
#endif
|
||||
#ifndef PV_7
|
||||
#define PV_7 0x200000 /* Power PC 7 */
|
||||
#endif
|
||||
#ifndef PV_7_Compat
|
||||
#define PV_7_Compat 0x208000 /* Power PC 7 */
|
||||
#endif
|
||||
#ifndef PV_8
|
||||
#define PV_8 0x300000 /* Power PC 8 */
|
||||
#endif
|
||||
#ifndef PV_8_Compat
|
||||
#define PV_8_Compat 0x308000 /* Power PC 8 */
|
||||
#endif
|
||||
|
||||
static address resolve_function_descriptor_to_code_pointer(address p);
|
||||
|
||||
static void vmembk_print_on(outputStream* os);
|
||||
@ -1443,17 +1469,48 @@ void os::print_memory_info(outputStream* st) {
|
||||
|
||||
// Get a string for the cpuinfo that is a summary of the cpu type
|
||||
void os::get_summary_cpu_info(char* buf, size_t buflen) {
|
||||
// This looks good
|
||||
libperfstat::cpuinfo_t ci;
|
||||
if (libperfstat::get_cpuinfo(&ci)) {
|
||||
strncpy(buf, ci.version, buflen);
|
||||
} else {
|
||||
strncpy(buf, "AIX", buflen);
|
||||
// read _system_configuration.version
|
||||
switch (_system_configuration.version) {
|
||||
case PV_8:
|
||||
strncpy(buf, "Power PC 8", buflen);
|
||||
break;
|
||||
case PV_7:
|
||||
strncpy(buf, "Power PC 7", buflen);
|
||||
break;
|
||||
case PV_6_1:
|
||||
strncpy(buf, "Power PC 6 DD1.x", buflen);
|
||||
break;
|
||||
case PV_6:
|
||||
strncpy(buf, "Power PC 6", buflen);
|
||||
break;
|
||||
case PV_5:
|
||||
strncpy(buf, "Power PC 5", buflen);
|
||||
break;
|
||||
case PV_5_2:
|
||||
strncpy(buf, "Power PC 5_2", buflen);
|
||||
break;
|
||||
case PV_5_3:
|
||||
strncpy(buf, "Power PC 5_3", buflen);
|
||||
break;
|
||||
case PV_5_Compat:
|
||||
strncpy(buf, "PV_5_Compat", buflen);
|
||||
break;
|
||||
case PV_6_Compat:
|
||||
strncpy(buf, "PV_6_Compat", buflen);
|
||||
break;
|
||||
case PV_7_Compat:
|
||||
strncpy(buf, "PV_7_Compat", buflen);
|
||||
break;
|
||||
case PV_8_Compat:
|
||||
strncpy(buf, "PV_8_Compat", buflen);
|
||||
break;
|
||||
default:
|
||||
strncpy(buf, "unknown", buflen);
|
||||
}
|
||||
}
|
||||
|
||||
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Nothing to do beyond what os::print_cpu_info() does.
|
||||
// Nothing to do beyond of what os::print_cpu_info() does.
|
||||
}
|
||||
|
||||
static void print_signal_handler(outputStream* st, int sig,
|
||||
@ -4242,48 +4299,6 @@ int os::fork_and_exec(char* cmd) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// is_headless_jre()
|
||||
//
|
||||
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
|
||||
// in order to report if we are running in a headless jre.
|
||||
//
|
||||
// Since JDK8 xawt/libmawt.so is moved into the same directory
|
||||
// as libawt.so, and renamed libawt_xawt.so
|
||||
bool os::is_headless_jre() {
|
||||
struct stat statbuf;
|
||||
char buf[MAXPATHLEN];
|
||||
char libmawtpath[MAXPATHLEN];
|
||||
const char *xawtstr = "/xawt/libmawt.so";
|
||||
const char *new_xawtstr = "/libawt_xawt.so";
|
||||
|
||||
char *p;
|
||||
|
||||
// Get path to libjvm.so
|
||||
os::jvm_path(buf, sizeof(buf));
|
||||
|
||||
// Get rid of libjvm.so
|
||||
p = strrchr(buf, '/');
|
||||
if (p == NULL) return false;
|
||||
else *p = '\0';
|
||||
|
||||
// Get rid of client or server
|
||||
p = strrchr(buf, '/');
|
||||
if (p == NULL) return false;
|
||||
else *p = '\0';
|
||||
|
||||
// check xawt/libmawt.so
|
||||
strcpy(libmawtpath, buf);
|
||||
strcat(libmawtpath, xawtstr);
|
||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||
|
||||
// check libawt_xawt.so
|
||||
strcpy(libmawtpath, buf);
|
||||
strcat(libmawtpath, new_xawtstr);
|
||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Get the default path to the core file
|
||||
// Returns the length of the string
|
||||
int os::get_core_path(char* buffer, size_t bufferSize) {
|
||||
|
@ -3894,59 +3894,6 @@ int os::fork_and_exec(char* cmd) {
|
||||
}
|
||||
}
|
||||
|
||||
// is_headless_jre()
|
||||
//
|
||||
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
|
||||
// in order to report if we are running in a headless jre
|
||||
//
|
||||
// Since JDK8 xawt/libmawt.so was moved into the same directory
|
||||
// as libawt.so, and renamed libawt_xawt.so
|
||||
//
|
||||
bool os::is_headless_jre() {
|
||||
#ifdef __APPLE__
|
||||
// We no longer build headless-only on Mac OS X
|
||||
return false;
|
||||
#else
|
||||
struct stat statbuf;
|
||||
char buf[MAXPATHLEN];
|
||||
char libmawtpath[MAXPATHLEN];
|
||||
const char *xawtstr = "/xawt/libmawt" JNI_LIB_SUFFIX;
|
||||
const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
|
||||
char *p;
|
||||
|
||||
// Get path to libjvm.so
|
||||
os::jvm_path(buf, sizeof(buf));
|
||||
|
||||
// Get rid of libjvm.so
|
||||
p = strrchr(buf, '/');
|
||||
if (p == NULL) {
|
||||
return false;
|
||||
} else {
|
||||
*p = '\0';
|
||||
}
|
||||
|
||||
// Get rid of client or server
|
||||
p = strrchr(buf, '/');
|
||||
if (p == NULL) {
|
||||
return false;
|
||||
} else {
|
||||
*p = '\0';
|
||||
}
|
||||
|
||||
// check xawt/libmawt.so
|
||||
strcpy(libmawtpath, buf);
|
||||
strcat(libmawtpath, xawtstr);
|
||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||
|
||||
// check libawt_xawt.so
|
||||
strcpy(libmawtpath, buf);
|
||||
strcat(libmawtpath, new_xawtstr);
|
||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Get the default path to the core file
|
||||
// Returns the length of the string
|
||||
int os::get_core_path(char* buffer, size_t bufferSize) {
|
||||
|
@ -5690,54 +5690,6 @@ int os::fork_and_exec(char* cmd) {
|
||||
}
|
||||
}
|
||||
|
||||
// is_headless_jre()
|
||||
//
|
||||
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
|
||||
// in order to report if we are running in a headless jre
|
||||
//
|
||||
// Since JDK8 xawt/libmawt.so was moved into the same directory
|
||||
// as libawt.so, and renamed libawt_xawt.so
|
||||
//
|
||||
bool os::is_headless_jre() {
|
||||
struct stat statbuf;
|
||||
char buf[MAXPATHLEN];
|
||||
char libmawtpath[MAXPATHLEN];
|
||||
const char *xawtstr = "/xawt/libmawt.so";
|
||||
const char *new_xawtstr = "/libawt_xawt.so";
|
||||
char *p;
|
||||
|
||||
// Get path to libjvm.so
|
||||
os::jvm_path(buf, sizeof(buf));
|
||||
|
||||
// Get rid of libjvm.so
|
||||
p = strrchr(buf, '/');
|
||||
if (p == NULL) {
|
||||
return false;
|
||||
} else {
|
||||
*p = '\0';
|
||||
}
|
||||
|
||||
// Get rid of client or server
|
||||
p = strrchr(buf, '/');
|
||||
if (p == NULL) {
|
||||
return false;
|
||||
} else {
|
||||
*p = '\0';
|
||||
}
|
||||
|
||||
// check xawt/libmawt.so
|
||||
strcpy(libmawtpath, buf);
|
||||
strcat(libmawtpath, xawtstr);
|
||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||
|
||||
// check libawt_xawt.so
|
||||
strcpy(libmawtpath, buf);
|
||||
strcat(libmawtpath, new_xawtstr);
|
||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Get the default path to the core file
|
||||
// Returns the length of the string
|
||||
int os::get_core_path(char* buffer, size_t bufferSize) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -147,13 +147,26 @@ static void save_memory_to_file(char* addr, size_t size) {
|
||||
// which is always a local file system and is sometimes a RAM based file
|
||||
// system.
|
||||
|
||||
|
||||
// return the user specific temporary directory name.
|
||||
//
|
||||
// If containerized process, get dirname of
|
||||
// /proc/{vmid}/root/tmp/{PERFDATA_NAME_user}
|
||||
// otherwise /tmp/{PERFDATA_NAME_user}
|
||||
//
|
||||
// the caller is expected to free the allocated memory.
|
||||
//
|
||||
static char* get_user_tmp_dir(const char* user) {
|
||||
#define TMP_BUFFER_LEN (4+22)
|
||||
static char* get_user_tmp_dir(const char* user, int vmid, int nspid) {
|
||||
char buffer[TMP_BUFFER_LEN];
|
||||
char* tmpdir = (char *)os::get_temp_directory();
|
||||
assert(strlen(tmpdir) == 4, "No longer using /tmp - update buffer size");
|
||||
|
||||
if (nspid != -1) {
|
||||
jio_snprintf(buffer, TMP_BUFFER_LEN, "/proc/%d/root%s", vmid, tmpdir);
|
||||
tmpdir = buffer;
|
||||
}
|
||||
|
||||
const char* tmpdir = os::get_temp_directory();
|
||||
const char* perfdir = PERFDATA_NAME;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
|
||||
char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
|
||||
@ -502,7 +515,10 @@ static char* get_user_name(uid_t uid) {
|
||||
//
|
||||
// the caller is expected to free the allocated memory.
|
||||
//
|
||||
static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
// If nspid != -1, look in /proc/{vmid}/root/tmp for directories
|
||||
// containing nspid, otherwise just look for vmid in /tmp
|
||||
//
|
||||
static char* get_user_name_slow(int vmid, int nspid, TRAPS) {
|
||||
|
||||
// short circuit the directory search if the process doesn't even exist.
|
||||
if (kill(vmid, 0) == OS_ERR) {
|
||||
@ -518,8 +534,19 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
// directory search
|
||||
char* oldest_user = NULL;
|
||||
time_t oldest_ctime = 0;
|
||||
char buffer[TMP_BUFFER_LEN];
|
||||
int searchpid;
|
||||
char* tmpdirname = (char *)os::get_temp_directory();
|
||||
assert(strlen(tmpdirname) == 4, "No longer using /tmp - update buffer size");
|
||||
|
||||
const char* tmpdirname = os::get_temp_directory();
|
||||
if (nspid == -1) {
|
||||
searchpid = vmid;
|
||||
}
|
||||
else {
|
||||
jio_snprintf(buffer, MAXPATHLEN, "/proc/%d/root%s", vmid, tmpdirname);
|
||||
tmpdirname = buffer;
|
||||
searchpid = nspid;
|
||||
}
|
||||
|
||||
// open the temp directory
|
||||
DIR* tmpdirp = os::opendir(tmpdirname);
|
||||
@ -530,7 +557,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
}
|
||||
|
||||
// for each entry in the directory that matches the pattern hsperfdata_*,
|
||||
// open the directory and check if the file for the given vmid exists.
|
||||
// open the directory and check if the file for the given vmid or nspid exists.
|
||||
// The file with the expected name and the latest creation date is used
|
||||
// to determine the user name for the process id.
|
||||
//
|
||||
@ -575,7 +602,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
errno = 0;
|
||||
while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
|
||||
|
||||
if (filename_to_pid(udentry->d_name) == vmid) {
|
||||
if (filename_to_pid(udentry->d_name) == searchpid) {
|
||||
struct stat statbuf;
|
||||
int result;
|
||||
|
||||
@ -626,10 +653,51 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
return(oldest_user);
|
||||
}
|
||||
|
||||
// Determine if the vmid is the parent pid
|
||||
// for a child in a PID namespace.
|
||||
// return the namespace pid if so, otherwise -1
|
||||
static int get_namespace_pid(int vmid) {
|
||||
char fname[24];
|
||||
int retpid = -1;
|
||||
|
||||
snprintf(fname, sizeof(fname), "/proc/%d/status", vmid);
|
||||
FILE *fp = fopen(fname, "r");
|
||||
|
||||
if (fp) {
|
||||
int pid, nspid;
|
||||
int ret;
|
||||
while (!feof(fp)) {
|
||||
ret = fscanf(fp, "NSpid: %d %d", &pid, &nspid);
|
||||
if (ret == 1) {
|
||||
break;
|
||||
}
|
||||
if (ret == 2) {
|
||||
retpid = nspid;
|
||||
break;
|
||||
}
|
||||
for (;;) {
|
||||
int ch = fgetc(fp);
|
||||
if (ch == EOF || ch == (int)'\n') break;
|
||||
}
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
return retpid;
|
||||
}
|
||||
|
||||
// return the name of the user that owns the JVM indicated by the given vmid.
|
||||
//
|
||||
static char* get_user_name(int vmid, TRAPS) {
|
||||
return get_user_name_slow(vmid, THREAD);
|
||||
static char* get_user_name(int vmid, int *nspid, TRAPS) {
|
||||
char *result = get_user_name_slow(vmid, *nspid, THREAD);
|
||||
|
||||
// If we are examining a container process without PID namespaces enabled
|
||||
// we need to use /proc/{pid}/root/tmp to find hsperfdata files.
|
||||
if (result == NULL) {
|
||||
result = get_user_name_slow(vmid, vmid, THREAD);
|
||||
// Enable nspid logic going forward
|
||||
if (result != NULL) *nspid = vmid;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// return the file name of the backing store file for the named
|
||||
@ -637,13 +705,15 @@ static char* get_user_name(int vmid, TRAPS) {
|
||||
//
|
||||
// the caller is expected to free the allocated memory.
|
||||
//
|
||||
static char* get_sharedmem_filename(const char* dirname, int vmid) {
|
||||
static char* get_sharedmem_filename(const char* dirname, int vmid, int nspid) {
|
||||
|
||||
int pid = (nspid == -1) ? vmid : nspid;
|
||||
|
||||
// add 2 for the file separator and a null terminator.
|
||||
size_t nbytes = strlen(dirname) + UINT_CHARS + 2;
|
||||
|
||||
char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
|
||||
snprintf(name, nbytes, "%s/%d", dirname, vmid);
|
||||
snprintf(name, nbytes, "%s/%d", dirname, pid);
|
||||
|
||||
return name;
|
||||
}
|
||||
@ -940,8 +1010,8 @@ static char* mmap_create_shared(size_t size) {
|
||||
if (user_name == NULL)
|
||||
return NULL;
|
||||
|
||||
char* dirname = get_user_tmp_dir(user_name);
|
||||
char* filename = get_sharedmem_filename(dirname, vmid);
|
||||
char* dirname = get_user_tmp_dir(user_name, vmid, -1);
|
||||
char* filename = get_sharedmem_filename(dirname, vmid, -1);
|
||||
|
||||
// get the short filename
|
||||
char* short_filename = strrchr(filename, '/');
|
||||
@ -1088,8 +1158,11 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
"Illegal access mode");
|
||||
}
|
||||
|
||||
// determine if vmid is for a containerized process
|
||||
int nspid = get_namespace_pid(vmid);
|
||||
|
||||
if (user == NULL || strlen(user) == 0) {
|
||||
luser = get_user_name(vmid, CHECK);
|
||||
luser = get_user_name(vmid, &nspid, CHECK);
|
||||
}
|
||||
else {
|
||||
luser = user;
|
||||
@ -1100,7 +1173,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
"Could not map vmid to user Name");
|
||||
}
|
||||
|
||||
char* dirname = get_user_tmp_dir(luser);
|
||||
char* dirname = get_user_tmp_dir(luser, vmid, nspid);
|
||||
|
||||
// since we don't follow symbolic links when creating the backing
|
||||
// store file, we don't follow them when attaching either.
|
||||
@ -1114,7 +1187,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
"Process not found");
|
||||
}
|
||||
|
||||
char* filename = get_sharedmem_filename(dirname, vmid);
|
||||
char* filename = get_sharedmem_filename(dirname, vmid, nspid);
|
||||
|
||||
// copy heap memory to resource memory. the open_sharedmem_file
|
||||
// method below need to use the filename, but could throw an
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
@ -153,8 +153,7 @@ void VMError::check_failing_cds_access(outputStream* st, const void* siginfo) {
|
||||
if (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) {
|
||||
const void* const fault_addr = si->si_addr;
|
||||
if (fault_addr != NULL) {
|
||||
FileMapInfo* const mapinfo = FileMapInfo::current_info();
|
||||
if (mapinfo->is_in_shared_space(fault_addr)) {
|
||||
if (MetaspaceShared::is_in_shared_metaspace(fault_addr)) {
|
||||
st->print("Error accessing class data sharing archive. "
|
||||
"Mapped file inaccessible during execution, possible disk/network problem.");
|
||||
}
|
||||
|
@ -5366,54 +5366,6 @@ int os::fork_and_exec(char* cmd) {
|
||||
}
|
||||
}
|
||||
|
||||
// is_headless_jre()
|
||||
//
|
||||
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
|
||||
// in order to report if we are running in a headless jre
|
||||
//
|
||||
// Since JDK8 xawt/libmawt.so was moved into the same directory
|
||||
// as libawt.so, and renamed libawt_xawt.so
|
||||
//
|
||||
bool os::is_headless_jre() {
|
||||
struct stat statbuf;
|
||||
char buf[MAXPATHLEN];
|
||||
char libmawtpath[MAXPATHLEN];
|
||||
const char *xawtstr = "/xawt/libmawt.so";
|
||||
const char *new_xawtstr = "/libawt_xawt.so";
|
||||
char *p;
|
||||
|
||||
// Get path to libjvm.so
|
||||
os::jvm_path(buf, sizeof(buf));
|
||||
|
||||
// Get rid of libjvm.so
|
||||
p = strrchr(buf, '/');
|
||||
if (p == NULL) {
|
||||
return false;
|
||||
} else {
|
||||
*p = '\0';
|
||||
}
|
||||
|
||||
// Get rid of client or server
|
||||
p = strrchr(buf, '/');
|
||||
if (p == NULL) {
|
||||
return false;
|
||||
} else {
|
||||
*p = '\0';
|
||||
}
|
||||
|
||||
// check xawt/libmawt.so
|
||||
strcpy(libmawtpath, buf);
|
||||
strcat(libmawtpath, xawtstr);
|
||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||
|
||||
// check libawt_xawt.so
|
||||
strcpy(libmawtpath, buf);
|
||||
strcat(libmawtpath, new_xawtstr);
|
||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t os::write(int fd, const void *buf, unsigned int nBytes) {
|
||||
size_t res;
|
||||
RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
|
||||
|
@ -5262,9 +5262,6 @@ LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
// We don't build a headless jre for Windows
|
||||
bool os::is_headless_jre() { return false; }
|
||||
|
||||
static jint initSock() {
|
||||
WSADATA wsadata;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
@ -58,8 +58,7 @@ void VMError::check_failing_cds_access(outputStream* st, const void* siginfo) {
|
||||
er->NumberParameters >= 2) {
|
||||
const void* const fault_addr = (const void*) er->ExceptionInformation[1];
|
||||
if (fault_addr != NULL) {
|
||||
FileMapInfo* const mapinfo = FileMapInfo::current_info();
|
||||
if (mapinfo->is_in_shared_space(fault_addr)) {
|
||||
if (MetaspaceShared::is_in_shared_metaspace(fault_addr)) {
|
||||
st->print("Error accessing class data sharing archive. "
|
||||
"Mapped file inaccessible during execution, possible disk/network problem.");
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -111,6 +111,10 @@ void os::Aix::ucontext_set_pc(ucontext_t* uc, address new_pc) {
|
||||
uc->uc_mcontext.jmp_context.iar = (uint64_t) new_pc;
|
||||
}
|
||||
|
||||
static address ucontext_get_lr(const ucontext_t * uc) {
|
||||
return (address)uc->uc_mcontext.jmp_context.lr;
|
||||
}
|
||||
|
||||
ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
|
||||
intptr_t** ret_sp, intptr_t** ret_fp) {
|
||||
|
||||
@ -167,7 +171,8 @@ bool os::Aix::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* u
|
||||
return false;
|
||||
} else {
|
||||
intptr_t* sp = os::Aix::ucontext_get_sp(uc);
|
||||
*fr = frame(sp, (address)*sp);
|
||||
address lr = ucontext_get_lr(uc);
|
||||
*fr = frame(sp, lr);
|
||||
if (!fr->is_java_frame()) {
|
||||
assert(fr->safe_for_sender(thread), "Safety check");
|
||||
assert(!fr->is_first_frame(), "Safety check");
|
||||
|
@ -279,11 +279,11 @@
|
||||
address os::current_stack_pointer() {
|
||||
#if defined(__clang__) || defined(__llvm__)
|
||||
register void *esp;
|
||||
__asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
|
||||
__asm__("mov %%" SPELL_REG_SP ", %0":"=r"(esp));
|
||||
return (address) esp;
|
||||
#elif defined(SPARC_WORKS)
|
||||
register void *esp;
|
||||
__asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
|
||||
__asm__("mov %%" SPELL_REG_SP ", %0":"=r"(esp));
|
||||
return (address) ((char*)esp + sizeof(long)*2);
|
||||
#else
|
||||
register void *esp __asm__ (SPELL_REG_SP);
|
||||
@ -415,7 +415,7 @@ frame os::get_sender_for_C_frame(frame* fr) {
|
||||
intptr_t* _get_previous_fp() {
|
||||
#if defined(SPARC_WORKS) || defined(__clang__) || defined(__llvm__)
|
||||
register intptr_t **ebp;
|
||||
__asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
|
||||
__asm__("mov %%" SPELL_REG_FP ", %0":"=r"(ebp));
|
||||
#else
|
||||
register intptr_t **ebp __asm__ (SPELL_REG_FP);
|
||||
#endif
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -121,6 +121,10 @@ void os::Linux::ucontext_set_pc(ucontext_t * uc, address pc) {
|
||||
uc->uc_mcontext.regs->nip = (unsigned long)pc;
|
||||
}
|
||||
|
||||
static address ucontext_get_lr(const ucontext_t * uc) {
|
||||
return (address)uc->uc_mcontext.regs->link;
|
||||
}
|
||||
|
||||
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
|
||||
return (intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/];
|
||||
}
|
||||
@ -178,9 +182,9 @@ bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t*
|
||||
// the frame is complete.
|
||||
return false;
|
||||
} else {
|
||||
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
|
||||
intptr_t* sp = os::Linux::ucontext_get_sp(uc);
|
||||
*fr = frame(sp, (address)*sp);
|
||||
address lr = ucontext_get_lr(uc);
|
||||
*fr = frame(sp, lr);
|
||||
if (!fr->is_java_frame()) {
|
||||
assert(fr->safe_for_sender(thread), "Safety check");
|
||||
assert(!fr->is_first_frame(), "Safety check");
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -108,6 +108,10 @@ void os::Linux::ucontext_set_pc(ucontext_t * uc, address pc) {
|
||||
uc->uc_mcontext.psw.addr = (unsigned long)pc;
|
||||
}
|
||||
|
||||
static address ucontext_get_lr(const ucontext_t * uc) {
|
||||
return (address)uc->uc_mcontext.gregs[14/*LINK*/];
|
||||
}
|
||||
|
||||
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
|
||||
return (intptr_t*)uc->uc_mcontext.gregs[15/*REG_SP*/];
|
||||
}
|
||||
@ -165,9 +169,9 @@ bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t*
|
||||
// the frame is complete.
|
||||
return false;
|
||||
} else {
|
||||
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
|
||||
intptr_t* sp = os::Linux::ucontext_get_sp(uc);
|
||||
*fr = frame(sp, (address)*sp);
|
||||
address lr = ucontext_get_lr(uc);
|
||||
*fr = frame(sp, lr);
|
||||
if (!fr->is_java_frame()) {
|
||||
assert(fr->safe_for_sender(thread), "Safety check");
|
||||
assert(!fr->is_first_frame(), "Safety check");
|
||||
|
@ -874,6 +874,8 @@ void GraphBuilder::ScopeData::incr_num_returns() {
|
||||
void GraphBuilder::load_constant() {
|
||||
ciConstant con = stream()->get_constant();
|
||||
if (con.basic_type() == T_ILLEGAL) {
|
||||
// FIXME: an unresolved Dynamic constant can get here,
|
||||
// and that should not terminate the whole compilation.
|
||||
BAILOUT("could not resolve a constant");
|
||||
} else {
|
||||
ValueType* t = illegalType;
|
||||
@ -893,11 +895,19 @@ void GraphBuilder::load_constant() {
|
||||
ciObject* obj = con.as_object();
|
||||
if (!obj->is_loaded()
|
||||
|| (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
|
||||
// A Class, MethodType, MethodHandle, or String.
|
||||
// Unloaded condy nodes show up as T_ILLEGAL, above.
|
||||
patch_state = copy_state_before();
|
||||
t = new ObjectConstant(obj);
|
||||
} else {
|
||||
assert(obj->is_instance(), "must be java_mirror of klass");
|
||||
t = new InstanceConstant(obj->as_instance());
|
||||
// Might be a Class, MethodType, MethodHandle, or Dynamic constant
|
||||
// result, which might turn out to be an array.
|
||||
if (obj->is_null_object())
|
||||
t = objectNull;
|
||||
else if (obj->is_array())
|
||||
t = new ArrayConstant(obj->as_array());
|
||||
else
|
||||
t = new InstanceConstant(obj->as_instance());
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -584,8 +584,34 @@ ciConstant ciEnv::get_constant_by_index_impl(const constantPoolHandle& cpool,
|
||||
int index = pool_index;
|
||||
if (cache_index >= 0) {
|
||||
assert(index < 0, "only one kind of index at a time");
|
||||
index = cpool->object_to_cp_index(cache_index);
|
||||
oop obj = cpool->resolved_references()->obj_at(cache_index);
|
||||
if (obj != NULL) {
|
||||
if (obj == Universe::the_null_sentinel()) {
|
||||
return ciConstant(T_OBJECT, get_object(NULL));
|
||||
}
|
||||
BasicType bt = T_OBJECT;
|
||||
if (cpool->tag_at(index).is_dynamic_constant())
|
||||
bt = FieldType::basic_type(cpool->uncached_signature_ref_at(index));
|
||||
if (is_reference_type(bt)) {
|
||||
} else {
|
||||
// we have to unbox the primitive value
|
||||
if (!is_java_primitive(bt)) return ciConstant();
|
||||
jvalue value;
|
||||
BasicType bt2 = java_lang_boxing_object::get_value(obj, &value);
|
||||
assert(bt2 == bt, "");
|
||||
switch (bt2) {
|
||||
case T_DOUBLE: return ciConstant(value.d);
|
||||
case T_FLOAT: return ciConstant(value.f);
|
||||
case T_LONG: return ciConstant(value.j);
|
||||
case T_INT: return ciConstant(bt2, value.i);
|
||||
case T_SHORT: return ciConstant(bt2, value.s);
|
||||
case T_BYTE: return ciConstant(bt2, value.b);
|
||||
case T_CHAR: return ciConstant(bt2, value.c);
|
||||
case T_BOOLEAN: return ciConstant(bt2, value.z);
|
||||
default: return ciConstant();
|
||||
}
|
||||
}
|
||||
ciObject* ciobj = get_object(obj);
|
||||
if (ciobj->is_array()) {
|
||||
return ciConstant(T_ARRAY, ciobj);
|
||||
@ -594,7 +620,6 @@ ciConstant ciEnv::get_constant_by_index_impl(const constantPoolHandle& cpool,
|
||||
return ciConstant(T_OBJECT, ciobj);
|
||||
}
|
||||
}
|
||||
index = cpool->object_to_cp_index(cache_index);
|
||||
}
|
||||
constantTag tag = cpool->tag_at(index);
|
||||
if (tag.is_int()) {
|
||||
@ -650,6 +675,8 @@ ciConstant ciEnv::get_constant_by_index_impl(const constantPoolHandle& cpool,
|
||||
ciSymbol* signature = get_symbol(cpool->method_handle_signature_ref_at(index));
|
||||
ciObject* ciobj = get_unloaded_method_handle_constant(callee, name, signature, ref_kind);
|
||||
return ciConstant(T_OBJECT, ciobj);
|
||||
} else if (tag.is_dynamic_constant()) {
|
||||
return ciConstant();
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
return ciConstant();
|
||||
|
@ -721,6 +721,7 @@ class CompileReplay : public StackObj {
|
||||
case JVM_CONSTANT_Float:
|
||||
case JVM_CONSTANT_MethodHandle:
|
||||
case JVM_CONSTANT_MethodType:
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
if (tag != cp->tag_at(i).value()) {
|
||||
report_error("tag mismatch: wrong class files?");
|
||||
|
@ -254,7 +254,8 @@ ciConstant ciBytecodeStream::get_constant() {
|
||||
// constant.
|
||||
constantTag ciBytecodeStream::get_constant_pool_tag(int index) const {
|
||||
VM_ENTRY_MARK;
|
||||
return _method->get_Method()->constants()->tag_at(index);
|
||||
BasicType bt = _method->get_Method()->constants()->basic_type_for_constant_at(index);
|
||||
return constantTag::ofBasicType(bt);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
@ -204,6 +204,21 @@ void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const s
|
||||
}
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_Dynamic : {
|
||||
if (_major_version < Verifier::DYNAMICCONSTANT_MAJOR_VERSION) {
|
||||
classfile_parse_error(
|
||||
"Class file version does not support constant tag %u in class file %s",
|
||||
tag, CHECK);
|
||||
}
|
||||
cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags
|
||||
const u2 bootstrap_specifier_index = cfs->get_u2_fast();
|
||||
const u2 name_and_type_index = cfs->get_u2_fast();
|
||||
if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index) {
|
||||
_max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later
|
||||
}
|
||||
cp->dynamic_constant_at_put(index, bootstrap_specifier_index, name_and_type_index);
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_InvokeDynamic : {
|
||||
if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
|
||||
classfile_parse_error(
|
||||
@ -536,6 +551,21 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
|
||||
ref_index, CHECK);
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_Dynamic: {
|
||||
const int name_and_type_ref_index =
|
||||
cp->invoke_dynamic_name_and_type_ref_index_at(index);
|
||||
|
||||
check_property(valid_cp_range(name_and_type_ref_index, length) &&
|
||||
cp->tag_at(name_and_type_ref_index).is_name_and_type(),
|
||||
"Invalid constant pool index %u in class file %s",
|
||||
name_and_type_ref_index, CHECK);
|
||||
// bootstrap specifier index must be checked later,
|
||||
// when BootstrapMethods attr is available
|
||||
|
||||
// Mark the constant pool as having a CONSTANT_Dynamic_info structure
|
||||
cp->set_has_dynamic_constant();
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_InvokeDynamic: {
|
||||
const int name_and_type_ref_index =
|
||||
cp->invoke_dynamic_name_and_type_ref_index_at(index);
|
||||
@ -628,6 +658,27 @@ void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
|
||||
}
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_Dynamic: {
|
||||
const int name_and_type_ref_index =
|
||||
cp->name_and_type_ref_index_at(index);
|
||||
// already verified to be utf8
|
||||
const int name_ref_index =
|
||||
cp->name_ref_index_at(name_and_type_ref_index);
|
||||
// already verified to be utf8
|
||||
const int signature_ref_index =
|
||||
cp->signature_ref_index_at(name_and_type_ref_index);
|
||||
const Symbol* const name = cp->symbol_at(name_ref_index);
|
||||
const Symbol* const signature = cp->symbol_at(signature_ref_index);
|
||||
if (_need_verify) {
|
||||
// CONSTANT_Dynamic's name and signature are verified above, when iterating NameAndType_info.
|
||||
// Need only to be sure signature is non-zero length and the right type.
|
||||
if (signature->utf8_length() == 0 ||
|
||||
signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
|
||||
throwIllegalSignature("CONSTANT_Dynamic", name, signature, CHECK);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
case JVM_CONSTANT_Fieldref:
|
||||
case JVM_CONSTANT_Methodref:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1292,7 +1292,7 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
|
||||
// Remove entries in the dictionary of live class loader that have
|
||||
// initiated loading classes in a dead class loader.
|
||||
if (data->dictionary() != NULL) {
|
||||
data->dictionary()->do_unloading();
|
||||
data->dictionary()->do_unloading(is_alive_closure);
|
||||
}
|
||||
// Walk a ModuleEntry's reads, and a PackageEntry's exports
|
||||
// lists to determine if there are modules on those lists that are now
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -167,7 +167,7 @@ bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
|
||||
for (ProtectionDomainEntry* current = pd_set_acquire();
|
||||
current != NULL;
|
||||
current = current->next()) {
|
||||
if (current->protection_domain() == protection_domain) {
|
||||
if (current->object_no_keepalive() == protection_domain) {
|
||||
in_pd_set = true;
|
||||
break;
|
||||
}
|
||||
@ -187,7 +187,7 @@ bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
|
||||
for (ProtectionDomainEntry* current = pd_set_acquire();
|
||||
current != NULL;
|
||||
current = current->next()) {
|
||||
if (current->protection_domain() == protection_domain) return true;
|
||||
if (current->object_no_keepalive() == protection_domain) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -212,8 +212,44 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, Handle protection_
|
||||
}
|
||||
}
|
||||
|
||||
// During class loading we may have cached a protection domain that has
|
||||
// since been unreferenced, so this entry should be cleared.
|
||||
void Dictionary::clean_cached_protection_domains(BoolObjectClosure* is_alive, DictionaryEntry* probe) {
|
||||
assert_locked_or_safepoint(SystemDictionary_lock);
|
||||
|
||||
void Dictionary::do_unloading() {
|
||||
ProtectionDomainEntry* current = probe->pd_set();
|
||||
ProtectionDomainEntry* prev = NULL;
|
||||
while (current != NULL) {
|
||||
if (!is_alive->do_object_b(current->object_no_keepalive())) {
|
||||
LogTarget(Debug, protectiondomain) lt;
|
||||
if (lt.is_enabled()) {
|
||||
ResourceMark rm;
|
||||
// Print out trace information
|
||||
LogStream ls(lt);
|
||||
ls.print_cr("PD in set is not alive:");
|
||||
ls.print("class loader: "); loader_data()->class_loader()->print_value_on(&ls);
|
||||
ls.print(" protection domain: "); current->object_no_keepalive()->print_value_on(&ls);
|
||||
ls.print(" loading: "); probe->instance_klass()->print_value_on(&ls);
|
||||
ls.cr();
|
||||
}
|
||||
if (probe->pd_set() == current) {
|
||||
probe->set_pd_set(current->next());
|
||||
} else {
|
||||
assert(prev != NULL, "should be set by alive entry");
|
||||
prev->set_next(current->next());
|
||||
}
|
||||
ProtectionDomainEntry* to_delete = current;
|
||||
current = current->next();
|
||||
delete to_delete;
|
||||
} else {
|
||||
prev = current;
|
||||
current = current->next();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Dictionary::do_unloading(BoolObjectClosure* is_alive) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
||||
|
||||
// The NULL class loader doesn't initiate loading classes from other class loaders
|
||||
@ -239,6 +275,8 @@ void Dictionary::do_unloading() {
|
||||
free_entry(probe);
|
||||
continue;
|
||||
}
|
||||
// Clean pd_set
|
||||
clean_cached_protection_domains(is_alive, probe);
|
||||
p = probe->next_addr();
|
||||
}
|
||||
}
|
||||
@ -412,6 +450,10 @@ void Dictionary::add_protection_domain(int index, unsigned int hash,
|
||||
|
||||
entry->add_protection_domain(this, protection_domain);
|
||||
|
||||
#ifdef ASSERT
|
||||
assert(loader_data() != ClassLoaderData::the_null_class_loader_data(), "doesn't make sense");
|
||||
#endif
|
||||
|
||||
assert(entry->contains_protection_domain(protection_domain()),
|
||||
"now protection domain should be present");
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,6 +53,8 @@ class Dictionary : public Hashtable<InstanceKlass*, mtClass> {
|
||||
|
||||
DictionaryEntry* get_entry(int index, unsigned int hash, Symbol* name);
|
||||
|
||||
void clean_cached_protection_domains(BoolObjectClosure* is_alive, DictionaryEntry* probe);
|
||||
|
||||
protected:
|
||||
static size_t entry_size();
|
||||
public:
|
||||
@ -84,7 +86,7 @@ public:
|
||||
void remove_classes_in_error_state();
|
||||
|
||||
// Unload classes whose defining loaders are unloaded
|
||||
void do_unloading();
|
||||
void do_unloading(BoolObjectClosure* is_alive);
|
||||
|
||||
// Protection domains
|
||||
InstanceKlass* find(unsigned int hash, Symbol* name, Handle protection_domain);
|
||||
@ -189,7 +191,7 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
|
||||
for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
|
||||
current != NULL;
|
||||
current = current->_next) {
|
||||
current->_pd_cache->protection_domain()->verify();
|
||||
current->_pd_cache->object_no_keepalive()->verify();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1702,7 +1702,7 @@ class BacktraceBuilder: public StackObj {
|
||||
method = mhandle();
|
||||
}
|
||||
|
||||
_methods->short_at_put(_index, method->orig_method_idnum());
|
||||
_methods->ushort_at_put(_index, method->orig_method_idnum());
|
||||
_bcis->int_at_put(_index, Backtrace::merge_bci_and_version(bci, method->constants()->version()));
|
||||
|
||||
// Note:this doesn't leak symbols because the mirror in the backtrace keeps the
|
||||
@ -1756,7 +1756,7 @@ class BacktraceIterator : public StackObj {
|
||||
|
||||
BacktraceElement next(Thread* thread) {
|
||||
BacktraceElement e (Handle(thread, _mirrors->obj_at(_index)),
|
||||
_methods->short_at(_index),
|
||||
_methods->ushort_at(_index),
|
||||
Backtrace::version_at(_bcis->int_at(_index)),
|
||||
Backtrace::bci_at(_bcis->int_at(_index)),
|
||||
_names->symbol_at(_index));
|
||||
@ -1968,7 +1968,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
|
||||
bool skip_throwableInit_check = false;
|
||||
bool skip_hidden = !ShowHiddenFrames;
|
||||
|
||||
for (frame fr = thread->last_frame(); max_depth != total_count;) {
|
||||
for (frame fr = thread->last_frame(); max_depth == 0 || max_depth != total_count;) {
|
||||
Method* method = NULL;
|
||||
int bci = 0;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,14 +52,14 @@ void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
|
||||
ProtectionDomainCacheEntry** p = bucket_addr(i);
|
||||
ProtectionDomainCacheEntry* entry = bucket(i);
|
||||
while (entry != NULL) {
|
||||
if (is_alive->do_object_b(entry->literal())) {
|
||||
if (is_alive->do_object_b(entry->object_no_keepalive())) {
|
||||
p = entry->next_addr();
|
||||
} else {
|
||||
LogTarget(Debug, protectiondomain) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
ls.print("protection domain unlinked: ");
|
||||
entry->literal()->print_value_on(&ls);
|
||||
entry->object_no_keepalive()->print_value_on(&ls);
|
||||
ls.cr();
|
||||
}
|
||||
*p = entry->next();
|
||||
@ -87,7 +87,7 @@ void ProtectionDomainCacheTable::print_on(outputStream* st) const {
|
||||
for (ProtectionDomainCacheEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
st->print_cr("%4d: protection_domain: " PTR_FORMAT, index, p2i(probe->literal()));
|
||||
st->print_cr("%4d: protection_domain: " PTR_FORMAT, index, p2i(probe->object_no_keepalive()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -96,8 +96,27 @@ void ProtectionDomainCacheTable::verify() {
|
||||
verify_table<ProtectionDomainCacheEntry>("Protection Domain Table");
|
||||
}
|
||||
|
||||
oop ProtectionDomainCacheEntry::object() {
|
||||
return RootAccess<ON_PHANTOM_OOP_REF>::oop_load(literal_addr());
|
||||
}
|
||||
|
||||
oop ProtectionDomainEntry::object() {
|
||||
return _pd_cache->object();
|
||||
}
|
||||
|
||||
// The object_no_keepalive() call peeks at the phantomly reachable oop without
|
||||
// keeping it alive. This is okay to do in the VM thread state if it is not
|
||||
// leaked out to become strongly reachable.
|
||||
oop ProtectionDomainCacheEntry::object_no_keepalive() {
|
||||
return RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(literal_addr());
|
||||
}
|
||||
|
||||
oop ProtectionDomainEntry::object_no_keepalive() {
|
||||
return _pd_cache->object_no_keepalive();
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheEntry::verify() {
|
||||
guarantee(oopDesc::is_oop(literal()), "must be an oop");
|
||||
guarantee(oopDesc::is_oop(object_no_keepalive()), "must be an oop");
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(Handle protection_domain) {
|
||||
@ -113,7 +132,7 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(Handle protection_do
|
||||
|
||||
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, Handle protection_domain) {
|
||||
for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
|
||||
if (e->protection_domain() == protection_domain()) {
|
||||
if (e->object_no_keepalive() == protection_domain()) {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,8 @@
|
||||
class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
|
||||
friend class VMStructs;
|
||||
public:
|
||||
oop protection_domain() { return literal(); }
|
||||
oop object();
|
||||
oop object_no_keepalive();
|
||||
|
||||
ProtectionDomainCacheEntry* next() {
|
||||
return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
|
||||
@ -112,6 +113,8 @@ class ProtectionDomainEntry :public CHeapObj<mtClass> {
|
||||
}
|
||||
|
||||
ProtectionDomainEntry* next() { return _next; }
|
||||
oop protection_domain() { return _pd_cache->protection_domain(); }
|
||||
void set_next(ProtectionDomainEntry* entry) { _next = entry; }
|
||||
oop object();
|
||||
oop object_no_keepalive();
|
||||
};
|
||||
#endif // SHARE_VM_CLASSFILE_PROTECTIONDOMAINCACHE_HPP
|
||||
|
@ -2641,6 +2641,81 @@ static bool is_always_visible_class(oop mirror) {
|
||||
InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::MethodHandle_klass())); // java.lang.invoke
|
||||
}
|
||||
|
||||
|
||||
// Return the Java mirror (java.lang.Class instance) for a single-character
|
||||
// descriptor. This result, when available, is the same as produced by the
|
||||
// heavier API point of the same name that takes a Symbol.
|
||||
oop SystemDictionary::find_java_mirror_for_type(char signature_char) {
|
||||
return java_lang_Class::primitive_mirror(char2type(signature_char));
|
||||
}
|
||||
|
||||
// Find or construct the Java mirror (java.lang.Class instance) for a
|
||||
// for the given field type signature, as interpreted relative to the
|
||||
// given class loader. Handles primitives, void, references, arrays,
|
||||
// and all other reflectable types, except method types.
|
||||
// N.B. Code in reflection should use this entry point.
|
||||
Handle SystemDictionary::find_java_mirror_for_type(Symbol* signature,
|
||||
Klass* accessing_klass,
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
SignatureStream::FailureMode failure_mode,
|
||||
TRAPS) {
|
||||
Handle empty;
|
||||
|
||||
assert(accessing_klass == NULL || (class_loader.is_null() && protection_domain.is_null()),
|
||||
"one or the other, or perhaps neither");
|
||||
|
||||
Symbol* type = signature;
|
||||
|
||||
// What we have here must be a valid field descriptor,
|
||||
// and all valid field descriptors are supported.
|
||||
// Produce the same java.lang.Class that reflection reports.
|
||||
if (type->utf8_length() == 1) {
|
||||
|
||||
// It's a primitive. (Void has a primitive mirror too.)
|
||||
char ch = (char) type->byte_at(0);
|
||||
assert(is_java_primitive(char2type(ch)) || ch == 'V', "");
|
||||
return Handle(THREAD, find_java_mirror_for_type(ch));
|
||||
|
||||
} else if (FieldType::is_obj(type) || FieldType::is_array(type)) {
|
||||
|
||||
// It's a reference type.
|
||||
if (accessing_klass != NULL) {
|
||||
class_loader = Handle(THREAD, accessing_klass->class_loader());
|
||||
protection_domain = Handle(THREAD, accessing_klass->protection_domain());
|
||||
}
|
||||
Klass* constant_type_klass;
|
||||
if (failure_mode == SignatureStream::ReturnNull) {
|
||||
constant_type_klass = resolve_or_null(type, class_loader, protection_domain,
|
||||
CHECK_(empty));
|
||||
} else {
|
||||
bool throw_error = (failure_mode == SignatureStream::NCDFError);
|
||||
constant_type_klass = resolve_or_fail(type, class_loader, protection_domain,
|
||||
throw_error, CHECK_(empty));
|
||||
}
|
||||
if (constant_type_klass == NULL) {
|
||||
return Handle(); // report failure this way
|
||||
}
|
||||
Handle mirror(THREAD, constant_type_klass->java_mirror());
|
||||
|
||||
// Check accessibility, emulating ConstantPool::verify_constant_pool_resolve.
|
||||
if (accessing_klass != NULL) {
|
||||
Klass* sel_klass = constant_type_klass;
|
||||
bool fold_type_to_class = true;
|
||||
LinkResolver::check_klass_accessability(accessing_klass, sel_klass,
|
||||
fold_type_to_class, CHECK_(empty));
|
||||
}
|
||||
|
||||
return mirror;
|
||||
|
||||
}
|
||||
|
||||
// Fall through to an error.
|
||||
assert(false, "unsupported mirror syntax");
|
||||
THROW_MSG_(vmSymbols::java_lang_InternalError(), "unsupported mirror syntax", empty);
|
||||
}
|
||||
|
||||
|
||||
// Ask Java code to find or construct a java.lang.invoke.MethodType for the given
|
||||
// signature, as interpreted relative to the given class loader.
|
||||
// Because of class loader constraints, all method handle usage must be
|
||||
@ -2695,15 +2770,13 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
|
||||
pts->obj_at_put(arg++, mirror);
|
||||
|
||||
// Check accessibility.
|
||||
if (ss.is_object() && accessing_klass != NULL) {
|
||||
if (!java_lang_Class::is_primitive(mirror) && accessing_klass != NULL) {
|
||||
Klass* sel_klass = java_lang_Class::as_Klass(mirror);
|
||||
mirror = NULL; // safety
|
||||
// Emulate ConstantPool::verify_constant_pool_resolve.
|
||||
if (sel_klass->is_objArray_klass())
|
||||
sel_klass = ObjArrayKlass::cast(sel_klass)->bottom_klass();
|
||||
if (sel_klass->is_instance_klass()) {
|
||||
LinkResolver::check_klass_accessability(accessing_klass, sel_klass, CHECK_(empty));
|
||||
}
|
||||
bool fold_type_to_class = true;
|
||||
LinkResolver::check_klass_accessability(accessing_klass, sel_klass,
|
||||
fold_type_to_class, CHECK_(empty));
|
||||
}
|
||||
}
|
||||
assert(arg == npts, "");
|
||||
@ -2806,9 +2879,60 @@ Handle SystemDictionary::link_method_handle_constant(Klass* caller,
|
||||
return Handle(THREAD, (oop) result.get_jobject());
|
||||
}
|
||||
|
||||
// Ask Java to compute a constant by invoking a BSM given a Dynamic_info CP entry
|
||||
Handle SystemDictionary::link_dynamic_constant(Klass* caller,
|
||||
int condy_index,
|
||||
Handle bootstrap_specifier,
|
||||
Symbol* name,
|
||||
Symbol* type,
|
||||
TRAPS) {
|
||||
Handle empty;
|
||||
Handle bsm, info;
|
||||
if (java_lang_invoke_MethodHandle::is_instance(bootstrap_specifier())) {
|
||||
bsm = bootstrap_specifier;
|
||||
} else {
|
||||
assert(bootstrap_specifier->is_objArray(), "");
|
||||
objArrayOop args = (objArrayOop) bootstrap_specifier();
|
||||
assert(args->length() == 2, "");
|
||||
bsm = Handle(THREAD, args->obj_at(0));
|
||||
info = Handle(THREAD, args->obj_at(1));
|
||||
}
|
||||
guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()),
|
||||
"caller must supply a valid BSM");
|
||||
|
||||
// This should not happen. JDK code should take care of that.
|
||||
if (caller == NULL) {
|
||||
THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad dynamic constant", empty);
|
||||
}
|
||||
|
||||
Handle constant_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
|
||||
|
||||
// Resolve the constant type in the context of the caller class
|
||||
Handle type_mirror = find_java_mirror_for_type(type, caller, SignatureStream::NCDFError,
|
||||
CHECK_(empty));
|
||||
|
||||
// call java.lang.invoke.MethodHandleNatives::linkConstantDyanmic(caller, condy_index, bsm, type, info)
|
||||
JavaCallArguments args;
|
||||
args.push_oop(Handle(THREAD, caller->java_mirror()));
|
||||
args.push_int(condy_index);
|
||||
args.push_oop(bsm);
|
||||
args.push_oop(constant_name);
|
||||
args.push_oop(type_mirror);
|
||||
args.push_oop(info);
|
||||
JavaValue result(T_OBJECT);
|
||||
JavaCalls::call_static(&result,
|
||||
SystemDictionary::MethodHandleNatives_klass(),
|
||||
vmSymbols::linkDynamicConstant_name(),
|
||||
vmSymbols::linkDynamicConstant_signature(),
|
||||
&args, CHECK_(empty));
|
||||
|
||||
return Handle(THREAD, (oop) result.get_jobject());
|
||||
}
|
||||
|
||||
// Ask Java code to find or construct a java.lang.invoke.CallSite for the given
|
||||
// name and signature, as interpreted relative to the given class loader.
|
||||
methodHandle SystemDictionary::find_dynamic_call_site_invoker(Klass* caller,
|
||||
int indy_index,
|
||||
Handle bootstrap_specifier,
|
||||
Symbol* name,
|
||||
Symbol* type,
|
||||
@ -2820,17 +2944,10 @@ methodHandle SystemDictionary::find_dynamic_call_site_invoker(Klass* caller,
|
||||
if (java_lang_invoke_MethodHandle::is_instance(bootstrap_specifier())) {
|
||||
bsm = bootstrap_specifier;
|
||||
} else {
|
||||
assert(bootstrap_specifier->is_objArray(), "");
|
||||
objArrayHandle args(THREAD, (objArrayOop) bootstrap_specifier());
|
||||
int len = args->length();
|
||||
assert(len >= 1, "");
|
||||
bsm = Handle(THREAD, args->obj_at(0));
|
||||
if (len > 1) {
|
||||
objArrayOop args1 = oopFactory::new_objArray(SystemDictionary::Object_klass(), len-1, CHECK_(empty));
|
||||
for (int i = 1; i < len; i++)
|
||||
args1->obj_at_put(i-1, args->obj_at(i));
|
||||
info = Handle(THREAD, args1);
|
||||
}
|
||||
objArrayOop args = (objArrayOop) bootstrap_specifier();
|
||||
assert(args->length() == 2, "");
|
||||
bsm = Handle(THREAD, args->obj_at(0));
|
||||
info = Handle(THREAD, args->obj_at(1));
|
||||
}
|
||||
guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()),
|
||||
"caller must supply a valid BSM");
|
||||
@ -2846,9 +2963,10 @@ methodHandle SystemDictionary::find_dynamic_call_site_invoker(Klass* caller,
|
||||
objArrayHandle appendix_box = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), 1, CHECK_(empty));
|
||||
assert(appendix_box->obj_at(0) == NULL, "");
|
||||
|
||||
// call java.lang.invoke.MethodHandleNatives::linkCallSite(caller, bsm, name, mtype, info, &appendix)
|
||||
// call java.lang.invoke.MethodHandleNatives::linkCallSite(caller, indy_index, bsm, name, mtype, info, &appendix)
|
||||
JavaCallArguments args;
|
||||
args.push_oop(Handle(THREAD, caller->java_mirror()));
|
||||
args.push_int(indy_index);
|
||||
args.push_oop(bsm);
|
||||
args.push_oop(method_name);
|
||||
args.push_oop(method_type);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/reflectionUtils.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
|
||||
@ -527,6 +528,28 @@ public:
|
||||
static methodHandle find_method_handle_intrinsic(vmIntrinsics::ID iid,
|
||||
Symbol* signature,
|
||||
TRAPS);
|
||||
|
||||
// compute java_mirror (java.lang.Class instance) for a type ("I", "[[B", "LFoo;", etc.)
|
||||
// Either the accessing_klass or the CL/PD can be non-null, but not both.
|
||||
static Handle find_java_mirror_for_type(Symbol* signature,
|
||||
Klass* accessing_klass,
|
||||
Handle class_loader,
|
||||
Handle protection_domain,
|
||||
SignatureStream::FailureMode failure_mode,
|
||||
TRAPS);
|
||||
static Handle find_java_mirror_for_type(Symbol* signature,
|
||||
Klass* accessing_klass,
|
||||
SignatureStream::FailureMode failure_mode,
|
||||
TRAPS) {
|
||||
// callee will fill in CL/PD from AK, if they are needed
|
||||
return find_java_mirror_for_type(signature, accessing_klass, Handle(), Handle(),
|
||||
failure_mode, THREAD);
|
||||
}
|
||||
|
||||
|
||||
// fast short-cut for the one-character case:
|
||||
static oop find_java_mirror_for_type(char signature_char);
|
||||
|
||||
// find a java.lang.invoke.MethodType object for a given signature
|
||||
// (asks Java to compute it if necessary, except in a compiler thread)
|
||||
static Handle find_method_handle_type(Symbol* signature,
|
||||
@ -546,8 +569,17 @@ public:
|
||||
Symbol* signature,
|
||||
TRAPS);
|
||||
|
||||
// ask Java to compute a constant by invoking a BSM given a Dynamic_info CP entry
|
||||
static Handle link_dynamic_constant(Klass* caller,
|
||||
int condy_index,
|
||||
Handle bootstrap_specifier,
|
||||
Symbol* name,
|
||||
Symbol* type,
|
||||
TRAPS);
|
||||
|
||||
// ask Java to create a dynamic call site, while linking an invokedynamic op
|
||||
static methodHandle find_dynamic_call_site_invoker(Klass* caller,
|
||||
int indy_index,
|
||||
Handle bootstrap_method,
|
||||
Symbol* name,
|
||||
Symbol* type,
|
||||
|
@ -2054,19 +2054,21 @@ void ClassVerifier::verify_ldc(
|
||||
const constantPoolHandle& cp, u2 bci, TRAPS) {
|
||||
verify_cp_index(bci, cp, index, CHECK_VERIFY(this));
|
||||
constantTag tag = cp->tag_at(index);
|
||||
unsigned int types;
|
||||
unsigned int types = 0;
|
||||
if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) {
|
||||
if (!tag.is_unresolved_klass()) {
|
||||
types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float)
|
||||
| (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class)
|
||||
| (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType);
|
||||
| (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType)
|
||||
| (1 << JVM_CONSTANT_Dynamic);
|
||||
// Note: The class file parser already verified the legality of
|
||||
// MethodHandle and MethodType constants.
|
||||
verify_cp_type(bci, index, cp, types, CHECK_VERIFY(this));
|
||||
}
|
||||
} else {
|
||||
assert(opcode == Bytecodes::_ldc2_w, "must be ldc2_w");
|
||||
types = (1 << JVM_CONSTANT_Double) | (1 << JVM_CONSTANT_Long);
|
||||
types = (1 << JVM_CONSTANT_Double) | (1 << JVM_CONSTANT_Long)
|
||||
| (1 << JVM_CONSTANT_Dynamic);
|
||||
verify_cp_type(bci, index, cp, types, CHECK_VERIFY(this));
|
||||
}
|
||||
if (tag.is_string() && cp->is_pseudo_string_at(index)) {
|
||||
@ -2101,6 +2103,30 @@ void ClassVerifier::verify_ldc(
|
||||
current_frame->push_stack(
|
||||
VerificationType::reference_type(
|
||||
vmSymbols::java_lang_invoke_MethodType()), CHECK_VERIFY(this));
|
||||
} else if (tag.is_dynamic_constant()) {
|
||||
Symbol* constant_type = cp->uncached_signature_ref_at(index);
|
||||
if (!SignatureVerifier::is_valid_type_signature(constant_type)) {
|
||||
class_format_error(
|
||||
"Invalid type for dynamic constant in class %s referenced "
|
||||
"from constant pool index %d", _klass->external_name(), index);
|
||||
return;
|
||||
}
|
||||
assert(sizeof(VerificationType) == sizeof(uintptr_t),
|
||||
"buffer type must match VerificationType size");
|
||||
uintptr_t constant_type_buffer[2];
|
||||
VerificationType* v_constant_type = (VerificationType*)constant_type_buffer;
|
||||
SignatureStream sig_stream(constant_type, false);
|
||||
int n = change_sig_to_verificationType(
|
||||
&sig_stream, v_constant_type, CHECK_VERIFY(this));
|
||||
int opcode_n = (opcode == Bytecodes::_ldc2_w ? 2 : 1);
|
||||
if (n != opcode_n) {
|
||||
// wrong kind of ldc; reverify against updated type mask
|
||||
types &= ~(1 << JVM_CONSTANT_Dynamic);
|
||||
verify_cp_type(bci, index, cp, types, CHECK_VERIFY(this));
|
||||
}
|
||||
for (int i = 0; i < n; i++) {
|
||||
current_frame->push_stack(v_constant_type[i], CHECK_VERIFY(this));
|
||||
}
|
||||
} else {
|
||||
/* Unreachable? verify_cp_type has already validated the cp type. */
|
||||
verify_error(
|
||||
@ -2665,7 +2691,7 @@ void ClassVerifier::verify_invoke_instructions(
|
||||
// Make sure the constant pool item is the right type
|
||||
u2 index = bcs->get_index_u2();
|
||||
Bytecodes::Code opcode = bcs->raw_code();
|
||||
unsigned int types;
|
||||
unsigned int types = 0;
|
||||
switch (opcode) {
|
||||
case Bytecodes::_invokeinterface:
|
||||
types = 1 << JVM_CONSTANT_InterfaceMethodref;
|
||||
|
@ -40,7 +40,8 @@ class Verifier : AllStatic {
|
||||
STRICTER_ACCESS_CTRL_CHECK_VERSION = 49,
|
||||
STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50,
|
||||
INVOKEDYNAMIC_MAJOR_VERSION = 51,
|
||||
NO_RELAX_ACCESS_CTRL_CHECK_VERSION = 52
|
||||
NO_RELAX_ACCESS_CTRL_CHECK_VERSION = 52,
|
||||
DYNAMICCONSTANT_MAJOR_VERSION = 55
|
||||
};
|
||||
typedef enum { ThrowException, NoException } Mode;
|
||||
|
||||
|
@ -98,6 +98,14 @@ void vmSymbols::initialize(TRAPS) {
|
||||
_type_signatures[T_BOOLEAN] = bool_signature();
|
||||
_type_signatures[T_VOID] = void_signature();
|
||||
// no single signatures for T_OBJECT or T_ARRAY
|
||||
#ifdef ASSERT
|
||||
for (int i = (int)T_BOOLEAN; i < (int)T_VOID+1; i++) {
|
||||
Symbol* s = _type_signatures[i];
|
||||
if (s == NULL) continue;
|
||||
BasicType st = signature_type(s);
|
||||
assert(st == i, "");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -202,9 +210,11 @@ void vmSymbols::serialize(SerializeClosure* soc) {
|
||||
|
||||
BasicType vmSymbols::signature_type(const Symbol* s) {
|
||||
assert(s != NULL, "checking");
|
||||
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
|
||||
if (s == _type_signatures[i]) {
|
||||
return (BasicType)i;
|
||||
if (s->utf8_length() == 1) {
|
||||
BasicType result = char2type(s->byte_at(0));
|
||||
if (is_java_primitive(result) || result == T_VOID) {
|
||||
assert(s == _type_signatures[result], "");
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return T_OBJECT;
|
||||
|
@ -307,8 +307,10 @@
|
||||
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
|
||||
template(linkMethod_name, "linkMethod") \
|
||||
template(linkMethod_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \
|
||||
template(linkDynamicConstant_name, "linkDynamicConstant") \
|
||||
template(linkDynamicConstant_signature, "(Ljava/lang/Object;ILjava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \
|
||||
template(linkCallSite_name, "linkCallSite") \
|
||||
template(linkCallSite_signature, "(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \
|
||||
template(linkCallSite_signature, "(Ljava/lang/Object;ILjava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \
|
||||
template(setTargetNormal_name, "setTargetNormal") \
|
||||
template(setTargetVolatile_name, "setTargetVolatile") \
|
||||
template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \
|
||||
|
@ -224,7 +224,7 @@ bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
|
||||
assert(bytecode == Bytecodes::_invokeinterface, "");
|
||||
int itable_index = call_info->itable_index();
|
||||
entry = VtableStubs::find_itable_stub(itable_index);
|
||||
if (entry == false) {
|
||||
if (entry == NULL) {
|
||||
return false;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -172,9 +172,8 @@ void ObjectLookup::maybe_resort() {
|
||||
}
|
||||
|
||||
int ObjectLookup::sort_by_address(oop a, oop b) {
|
||||
if (b > a) return 1;
|
||||
if (a > b) return -1;
|
||||
return 0;
|
||||
// oopDesc::compare returns the opposite of what this function returned
|
||||
return -(oopDesc::compare(a, b));
|
||||
}
|
||||
|
||||
int ObjectLookup::sort_by_address(ObjectEntry* a, ObjectEntry* b) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -246,11 +246,11 @@ class CMSMarkStack: public CHeapObj<mtGC> {
|
||||
|
||||
// Compute the least valued stack element.
|
||||
oop least_value(HeapWord* low) {
|
||||
oop least = (oop)low;
|
||||
for (size_t i = 0; i < _index; i++) {
|
||||
least = MIN2(least, _base[i]);
|
||||
}
|
||||
return least;
|
||||
HeapWord* least = low;
|
||||
for (size_t i = 0; i < _index; i++) {
|
||||
least = MIN2(least, (HeapWord*)_base[i]);
|
||||
}
|
||||
return (oop)least;
|
||||
}
|
||||
|
||||
// Exposed here to allow stack expansion in || case.
|
||||
|
@ -54,7 +54,7 @@ public:
|
||||
// pre-marking object graph.
|
||||
static void enqueue(oop pre_val);
|
||||
|
||||
static void enqueue_if_weak(DecoratorSet decorators, oop value);
|
||||
static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value);
|
||||
|
||||
template <class T> void write_ref_array_pre_work(T* dst, int count);
|
||||
virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
|
||||
|
@ -60,12 +60,17 @@ void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
|
||||
_byte_map[card_index] = val;
|
||||
}
|
||||
|
||||
inline void G1SATBCardTableModRefBS::enqueue_if_weak(DecoratorSet decorators, oop value) {
|
||||
inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
|
||||
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
||||
// Archive roots need to be enqueued since they add subgraphs to the
|
||||
// Java heap that were not there at the snapshot when marking started.
|
||||
// Weak and phantom references also need enqueueing for similar reasons.
|
||||
const bool in_archive_root = (decorators & IN_ARCHIVE_ROOT) != 0;
|
||||
const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
|
||||
const bool peek = (decorators & AS_NO_KEEPALIVE) != 0;
|
||||
const bool needs_enqueue = in_archive_root || (!peek && !on_strong_oop_ref);
|
||||
|
||||
if (!peek && !on_strong_oop_ref && value != NULL) {
|
||||
if (needs_enqueue && value != NULL) {
|
||||
enqueue(value);
|
||||
}
|
||||
}
|
||||
@ -75,7 +80,7 @@ template <typename T>
|
||||
inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
|
||||
oop_load_not_in_heap(T* addr) {
|
||||
oop value = ModRef::oop_load_not_in_heap(addr);
|
||||
enqueue_if_weak(decorators, value);
|
||||
enqueue_if_weak_or_archive(decorators, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
@ -84,7 +89,7 @@ template <typename T>
|
||||
inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
|
||||
oop_load_in_heap(T* addr) {
|
||||
oop value = ModRef::oop_load_in_heap(addr);
|
||||
enqueue_if_weak(decorators, value);
|
||||
enqueue_if_weak_or_archive(decorators, value);
|
||||
return value;
|
||||
}
|
||||
|
||||
@ -92,7 +97,7 @@ template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
|
||||
oop_load_in_heap_at(oop base, ptrdiff_t offset) {
|
||||
oop value = ModRef::oop_load_in_heap_at(base, offset);
|
||||
enqueue_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
|
||||
enqueue_if_weak_or_archive(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
|
||||
return value;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -131,26 +131,28 @@ ParMarkBitMap::live_words_in_range_helper(HeapWord* beg_addr, oop end_obj) const
|
||||
}
|
||||
|
||||
size_t
|
||||
ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const
|
||||
ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_oop) const
|
||||
{
|
||||
HeapWord* last_beg = cm->last_query_begin();
|
||||
oop last_obj = cm->last_query_object();
|
||||
HeapWord* last_obj = (HeapWord*)cm->last_query_object();
|
||||
HeapWord* end_obj = (HeapWord*)end_oop;
|
||||
|
||||
size_t last_ret = cm->last_query_return();
|
||||
if (end_obj > last_obj) {
|
||||
last_ret = last_ret + live_words_in_range_helper((HeapWord*)last_obj, end_obj);
|
||||
last_ret = last_ret + live_words_in_range_helper(last_obj, end_oop);
|
||||
last_obj = end_obj;
|
||||
} else if (end_obj < last_obj) {
|
||||
// The cached value is for an object that is to the left (lower address) of the current
|
||||
// end_obj. Calculate back from that cached value.
|
||||
if (pointer_delta((HeapWord*)end_obj, (HeapWord*)beg_addr) > pointer_delta((HeapWord*)last_obj, (HeapWord*)end_obj)) {
|
||||
last_ret = last_ret - live_words_in_range_helper((HeapWord*)end_obj, last_obj);
|
||||
if (pointer_delta(end_obj, beg_addr) > pointer_delta(last_obj, end_obj)) {
|
||||
last_ret = last_ret - live_words_in_range_helper(end_obj, (oop)last_obj);
|
||||
} else {
|
||||
last_ret = live_words_in_range_helper(beg_addr, end_obj);
|
||||
last_ret = live_words_in_range_helper(beg_addr, end_oop);
|
||||
}
|
||||
last_obj = end_obj;
|
||||
}
|
||||
|
||||
update_live_words_in_range_cache(cm, last_beg, last_obj, last_ret);
|
||||
update_live_words_in_range_cache(cm, last_beg, (oop)last_obj, last_ret);
|
||||
return last_ret;
|
||||
}
|
||||
|
||||
|
709
src/hotspot/share/gc/shared/oopStorage.cpp
Normal file
709
src/hotspot/share/gc/shared/oopStorage.cpp
Normal file
@ -0,0 +1,709 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/oopStorage.inline.hpp"
|
||||
#include "gc/shared/oopStorageParState.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/count_trailing_zeros.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
OopStorage::BlockEntry::BlockEntry() : _prev(NULL), _next(NULL) {}
|
||||
|
||||
OopStorage::BlockEntry::~BlockEntry() {
|
||||
assert(_prev == NULL, "deleting attached block");
|
||||
assert(_next == NULL, "deleting attached block");
|
||||
}
|
||||
|
||||
OopStorage::BlockList::BlockList(const BlockEntry& (*get_entry)(const Block& block)) :
|
||||
_head(NULL), _tail(NULL), _get_entry(get_entry)
|
||||
{}
|
||||
|
||||
OopStorage::BlockList::~BlockList() {
|
||||
// ~OopStorage() empties its lists before destroying them.
|
||||
assert(_head == NULL, "deleting non-empty block list");
|
||||
assert(_tail == NULL, "deleting non-empty block list");
|
||||
}
|
||||
|
||||
void OopStorage::BlockList::push_front(const Block& block) {
|
||||
const Block* old = _head;
|
||||
if (old == NULL) {
|
||||
assert(_tail == NULL, "invariant");
|
||||
_head = _tail = █
|
||||
} else {
|
||||
_get_entry(block)._next = old;
|
||||
_get_entry(*old)._prev = █
|
||||
_head = █
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorage::BlockList::push_back(const Block& block) {
|
||||
const Block* old = _tail;
|
||||
if (old == NULL) {
|
||||
assert(_head == NULL, "invariant");
|
||||
_head = _tail = █
|
||||
} else {
|
||||
_get_entry(*old)._next = █
|
||||
_get_entry(block)._prev = old;
|
||||
_tail = █
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorage::BlockList::unlink(const Block& block) {
|
||||
const BlockEntry& block_entry = _get_entry(block);
|
||||
const Block* prev_blk = block_entry._prev;
|
||||
const Block* next_blk = block_entry._next;
|
||||
block_entry._prev = NULL;
|
||||
block_entry._next = NULL;
|
||||
if ((prev_blk == NULL) && (next_blk == NULL)) {
|
||||
assert(_head == &block, "invariant");
|
||||
assert(_tail == &block, "invariant");
|
||||
_head = _tail = NULL;
|
||||
} else if (prev_blk == NULL) {
|
||||
assert(_head == &block, "invariant");
|
||||
_get_entry(*next_blk)._prev = NULL;
|
||||
_head = next_blk;
|
||||
} else if (next_blk == NULL) {
|
||||
assert(_tail == &block, "invariant");
|
||||
_get_entry(*prev_blk)._next = NULL;
|
||||
_tail = prev_blk;
|
||||
} else {
|
||||
_get_entry(*next_blk)._prev = prev_blk;
|
||||
_get_entry(*prev_blk)._next = next_blk;
|
||||
}
|
||||
}
|
||||
|
||||
// Blocks start with an array of BitsPerWord oop entries. That array
|
||||
// is divided into conceptual BytesPerWord sections of BitsPerWord
|
||||
// entries. Blocks are allocated aligned on section boundaries, for
|
||||
// the convenience of mapping from an entry to the containing block;
|
||||
// see block_for_ptr(). Aligning on section boundary rather than on
|
||||
// the full _data wastes a lot less space, but makes for a bit more
|
||||
// work in block_for_ptr().
|
||||
|
||||
const unsigned section_size = BitsPerByte;
|
||||
const unsigned section_count = BytesPerWord;
|
||||
const unsigned block_alignment = sizeof(oop) * section_size;
|
||||
|
||||
// VS2013 warns (C4351) that elements of _data will be *correctly* default
|
||||
// initialized, unlike earlier versions that *incorrectly* did not do so.
|
||||
#ifdef _WINDOWS
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable: 4351)
|
||||
#endif // _WINDOWS
|
||||
OopStorage::Block::Block(const OopStorage* owner, void* memory) :
|
||||
_data(),
|
||||
_allocated_bitmask(0),
|
||||
_owner(owner),
|
||||
_memory(memory),
|
||||
_active_entry(),
|
||||
_allocate_entry()
|
||||
{
|
||||
STATIC_ASSERT(_data_pos == 0);
|
||||
STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data));
|
||||
assert(offset_of(Block, _data) == _data_pos, "invariant");
|
||||
assert(owner != NULL, "NULL owner");
|
||||
assert(is_aligned(this, block_alignment), "misaligned block");
|
||||
}
|
||||
#ifdef _WINDOWS
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
OopStorage::Block::~Block() {
|
||||
// Clear fields used by block_for_ptr and entry validation, which
|
||||
// might help catch bugs. Volatile to prevent dead-store elimination.
|
||||
const_cast<uintx volatile&>(_allocated_bitmask) = 0;
|
||||
const_cast<OopStorage* volatile&>(_owner) = NULL;
|
||||
}
|
||||
|
||||
const OopStorage::BlockEntry& OopStorage::Block::get_active_entry(const Block& block) {
|
||||
return block._active_entry;
|
||||
}
|
||||
|
||||
const OopStorage::BlockEntry& OopStorage::Block::get_allocate_entry(const Block& block) {
|
||||
return block._allocate_entry;
|
||||
}
|
||||
|
||||
size_t OopStorage::Block::allocation_size() {
|
||||
// _data must be first member, so aligning Block aligns _data.
|
||||
STATIC_ASSERT(_data_pos == 0);
|
||||
return sizeof(Block) + block_alignment - sizeof(void*);
|
||||
}
|
||||
|
||||
size_t OopStorage::Block::allocation_alignment_shift() {
|
||||
return exact_log2(block_alignment);
|
||||
}
|
||||
|
||||
inline bool is_full_bitmask(uintx bitmask) { return ~bitmask == 0; }
|
||||
inline bool is_empty_bitmask(uintx bitmask) { return bitmask == 0; }
|
||||
|
||||
bool OopStorage::Block::is_full() const {
|
||||
return is_full_bitmask(allocated_bitmask());
|
||||
}
|
||||
|
||||
bool OopStorage::Block::is_empty() const {
|
||||
return is_empty_bitmask(allocated_bitmask());
|
||||
}
|
||||
|
||||
uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
|
||||
return bitmask_for_index(get_index(ptr));
|
||||
}
|
||||
|
||||
uintx OopStorage::Block::cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value) {
|
||||
return Atomic::cmpxchg(new_value, &_allocated_bitmask, compare_value);
|
||||
}
|
||||
|
||||
bool OopStorage::Block::contains(const oop* ptr) const {
|
||||
const oop* base = get_pointer(0);
|
||||
return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data)));
|
||||
}
|
||||
|
||||
unsigned OopStorage::Block::get_index(const oop* ptr) const {
|
||||
assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this));
|
||||
return static_cast<unsigned>(ptr - get_pointer(0));
|
||||
}
|
||||
|
||||
oop* OopStorage::Block::allocate() {
|
||||
// Use CAS loop because release may change bitmask outside of lock.
|
||||
uintx allocated = allocated_bitmask();
|
||||
while (true) {
|
||||
assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
|
||||
unsigned index = count_trailing_zeros(~allocated);
|
||||
uintx new_value = allocated | bitmask_for_index(index);
|
||||
uintx fetched = cmpxchg_allocated_bitmask(new_value, allocated);
|
||||
if (fetched == allocated) {
|
||||
return get_pointer(index); // CAS succeeded; return entry for index.
|
||||
}
|
||||
allocated = fetched; // CAS failed; retry with latest value.
|
||||
}
|
||||
}
|
||||
|
||||
OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) {
|
||||
// _data must be first member: aligning block => aligning _data.
|
||||
STATIC_ASSERT(_data_pos == 0);
|
||||
size_t size_needed = allocation_size();
|
||||
void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, mtGC);
|
||||
if (memory == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
void* block_mem = align_up(memory, block_alignment);
|
||||
assert(sizeof(Block) + pointer_delta(block_mem, memory, 1) <= size_needed,
|
||||
"allocated insufficient space for aligned block");
|
||||
return ::new (block_mem) Block(owner, memory);
|
||||
}
|
||||
|
||||
void OopStorage::Block::delete_block(const Block& block) {
|
||||
void* memory = block._memory;
|
||||
block.Block::~Block();
|
||||
FREE_C_HEAP_ARRAY(char, memory);
|
||||
}
|
||||
|
||||
// This can return a false positive if ptr is not contained by some
|
||||
// block. For some uses, it is a precondition that ptr is valid,
|
||||
// e.g. contained in some block in owner's _active_list. Other uses
|
||||
// require additional validation of the result.
|
||||
OopStorage::Block*
|
||||
OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
|
||||
assert(CanUseSafeFetchN(), "precondition");
|
||||
STATIC_ASSERT(_data_pos == 0);
|
||||
// Const-ness of ptr is not related to const-ness of containing block.
|
||||
// Blocks are allocated section-aligned, so get the containing section.
|
||||
oop* section_start = align_down(const_cast<oop*>(ptr), block_alignment);
|
||||
// Start with a guess that the containing section is the last section,
|
||||
// so the block starts section_count-1 sections earlier.
|
||||
oop* section = section_start - (section_size * (section_count - 1));
|
||||
// Walk up through the potential block start positions, looking for
|
||||
// the owner in the expected location. If we're below the actual block
|
||||
// start position, the value at the owner position will be some oop
|
||||
// (possibly NULL), which can never match the owner.
|
||||
intptr_t owner_addr = reinterpret_cast<intptr_t>(owner);
|
||||
for (unsigned i = 0; i < section_count; ++i, section += section_size) {
|
||||
Block* candidate = reinterpret_cast<Block*>(section);
|
||||
intptr_t* candidate_owner_addr
|
||||
= reinterpret_cast<intptr_t*>(&candidate->_owner);
|
||||
if (SafeFetchN(candidate_owner_addr, 0) == owner_addr) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool OopStorage::is_valid_block_locked_or_safepoint(const Block* check_block) const {
|
||||
assert_locked_or_safepoint(_allocate_mutex);
|
||||
// For now, simple linear search. Do something more clever if this
|
||||
// is a performance bottleneck, particularly for allocation_status.
|
||||
for (const Block* block = _active_list.chead();
|
||||
block != NULL;
|
||||
block = _active_list.next(*block)) {
|
||||
if (check_block == block) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void OopStorage::assert_at_safepoint() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Allocation
|
||||
//
|
||||
// Allocation involves the _allocate_list, which contains a subset of the
|
||||
// blocks owned by a storage object. This is a doubly-linked list, linked
|
||||
// through dedicated fields in the blocks. Full blocks are removed from this
|
||||
// list, though they are still present in the _active_list. Empty blocks are
|
||||
// kept at the end of the _allocate_list, to make it easy for empty block
|
||||
// deletion to find them.
|
||||
//
|
||||
// allocate(), release(), and delete_empty_blocks_concurrent() all lock the
|
||||
// _allocate_mutex while performing any list modifications.
|
||||
//
|
||||
// allocate() and release() update a block's _allocated_bitmask using CAS
|
||||
// loops. This prevents loss of updates even though release() may perform
|
||||
// some updates without any locking.
|
||||
//
|
||||
// allocate() obtains the entry from the first block in the _allocate_list,
|
||||
// and updates that block's _allocated_bitmask to indicate the entry is in
|
||||
// use. If this makes the block full (all entries in use), the block is
|
||||
// removed from the _allocate_list so it won't be considered by future
|
||||
// allocations until some entries in it are relased.
|
||||
//
|
||||
// release() looks up the block for the entry without locking. Once the block
|
||||
// has been determined, its _allocated_bitmask needs to be updated, and its
|
||||
// position in the _allocate_list may need to be updated. There are two
|
||||
// cases:
|
||||
//
|
||||
// (a) If the block is neither full nor would become empty with the release of
|
||||
// the entry, only its _allocated_bitmask needs to be updated. But if the CAS
|
||||
// update fails, the applicable case may change for the retry.
|
||||
//
|
||||
// (b) Otherwise, the _allocate_list will also need to be modified. This
|
||||
// requires locking the _allocate_mutex, and then attempting to CAS the
|
||||
// _allocated_bitmask. If the CAS fails, the applicable case may change for
|
||||
// the retry. If the CAS succeeds, then update the _allocate_list according
|
||||
// to the the state changes. If the block changed from full to not full, then
|
||||
// it needs to be added to the _allocate_list, for use in future allocations.
|
||||
// If the block changed from not empty to empty, then it is moved to the end
|
||||
// of the _allocate_list, for ease of empty block deletion processing.
|
||||
|
||||
oop* OopStorage::allocate() {
|
||||
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
Block* block = _allocate_list.head();
|
||||
if (block == NULL) {
|
||||
// No available blocks; make a new one, and add to storage.
|
||||
{
|
||||
MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
block = Block::new_block(this);
|
||||
}
|
||||
if (block != NULL) {
|
||||
// Add new block to storage.
|
||||
log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
|
||||
|
||||
// Add to end of _allocate_list. The mutex release allowed
|
||||
// other threads to add blocks to the _allocate_list. We prefer
|
||||
// to allocate from non-empty blocks, to allow empty blocks to
|
||||
// be deleted.
|
||||
_allocate_list.push_back(*block);
|
||||
++_empty_block_count;
|
||||
// Add to front of _active_list, and then record as the head
|
||||
// block, for concurrent iteration protocol.
|
||||
_active_list.push_front(*block);
|
||||
++_block_count;
|
||||
// Ensure all setup of block is complete before making it visible.
|
||||
OrderAccess::release_store(&_active_head, block);
|
||||
} else {
|
||||
log_info(oopstorage, blocks)("%s: failed new block allocation", name());
|
||||
}
|
||||
block = _allocate_list.head();
|
||||
if (block == NULL) {
|
||||
// Failed to make new block, and no other thread made a block
|
||||
// available while the mutex was released, so return failure.
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
// Allocate from first block.
|
||||
assert(block != NULL, "invariant");
|
||||
assert(!block->is_full(), "invariant");
|
||||
if (block->is_empty()) {
|
||||
// Transitioning from empty to not empty.
|
||||
log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
|
||||
--_empty_block_count;
|
||||
}
|
||||
oop* result = block->allocate();
|
||||
assert(result != NULL, "allocation failed");
|
||||
assert(!block->is_empty(), "postcondition");
|
||||
Atomic::inc(&_allocation_count); // release updates outside lock.
|
||||
if (block->is_full()) {
|
||||
// Transitioning from not full to full.
|
||||
// Remove full blocks from consideration by future allocates.
|
||||
log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
|
||||
_allocate_list.unlink(*block);
|
||||
}
|
||||
log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
OopStorage::Block* OopStorage::find_block_or_null(const oop* ptr) const {
|
||||
assert(ptr != NULL, "precondition");
|
||||
return Block::block_for_ptr(this, ptr);
|
||||
}
|
||||
|
||||
void OopStorage::release_from_block(Block& block, uintx releasing) {
|
||||
assert(releasing != 0, "invariant");
|
||||
uintx allocated = block.allocated_bitmask();
|
||||
while (true) {
|
||||
assert(releasing == (allocated & releasing), "invariant");
|
||||
uintx new_value = allocated ^ releasing;
|
||||
// CAS new_value into block's allocated bitmask, retrying with
|
||||
// updated allocated bitmask until the CAS succeeds.
|
||||
uintx fetched;
|
||||
if (!is_full_bitmask(allocated) && !is_empty_bitmask(new_value)) {
|
||||
fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
|
||||
if (fetched == allocated) return;
|
||||
} else {
|
||||
// Need special handling if transitioning from full to not full,
|
||||
// or from not empty to empty. For those cases, must hold the
|
||||
// _allocation_mutex when updating the allocated bitmask, to
|
||||
// ensure the associated list manipulations will be consistent
|
||||
// with the allocation bitmask that is visible to other threads
|
||||
// in allocate() or deleting empty blocks.
|
||||
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
|
||||
if (fetched == allocated) {
|
||||
// CAS succeeded; handle special cases, which might no longer apply.
|
||||
if (is_full_bitmask(allocated)) {
|
||||
// Transitioning from full to not-full; add to _allocate_list.
|
||||
log_debug(oopstorage, blocks)("%s: block not full " PTR_FORMAT, name(), p2i(&block));
|
||||
_allocate_list.push_front(block);
|
||||
assert(!block.is_full(), "invariant"); // Still not full.
|
||||
}
|
||||
if (is_empty_bitmask(new_value)) {
|
||||
// Transitioning from not-empty to empty; move to end of
|
||||
// _allocate_list, to make it a deletion candidate.
|
||||
log_debug(oopstorage, blocks)("%s: block empty " PTR_FORMAT, name(), p2i(&block));
|
||||
_allocate_list.unlink(block);
|
||||
_allocate_list.push_back(block);
|
||||
++_empty_block_count;
|
||||
assert(block.is_empty(), "invariant"); // Still empty.
|
||||
}
|
||||
return; // Successful CAS and transitions handled.
|
||||
}
|
||||
}
|
||||
// CAS failed; retry with latest value.
|
||||
allocated = fetched;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void OopStorage::check_release(const Block* block, const oop* ptr) const {
|
||||
switch (allocation_status_validating_block(block, ptr)) {
|
||||
case INVALID_ENTRY:
|
||||
fatal("Releasing invalid entry: " PTR_FORMAT, p2i(ptr));
|
||||
break;
|
||||
|
||||
case UNALLOCATED_ENTRY:
|
||||
fatal("Releasing unallocated entry: " PTR_FORMAT, p2i(ptr));
|
||||
break;
|
||||
|
||||
case ALLOCATED_ENTRY:
|
||||
assert(block->contains(ptr), "invariant");
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
inline void check_release_entry(const oop* entry) {
|
||||
assert(entry != NULL, "Releasing NULL");
|
||||
assert(*entry == NULL, "Releasing uncleared entry: " PTR_FORMAT, p2i(entry));
|
||||
}
|
||||
|
||||
void OopStorage::release(const oop* ptr) {
|
||||
check_release_entry(ptr);
|
||||
Block* block = find_block_or_null(ptr);
|
||||
check_release(block, ptr);
|
||||
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr));
|
||||
release_from_block(*block, block->bitmask_for_entry(ptr));
|
||||
Atomic::dec(&_allocation_count);
|
||||
}
|
||||
|
||||
void OopStorage::release(const oop* const* ptrs, size_t size) {
|
||||
size_t i = 0;
|
||||
while (i < size) {
|
||||
check_release_entry(ptrs[i]);
|
||||
Block* block = find_block_or_null(ptrs[i]);
|
||||
check_release(block, ptrs[i]);
|
||||
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
|
||||
size_t count = 0;
|
||||
uintx releasing = 0;
|
||||
for ( ; i < size; ++i) {
|
||||
const oop* entry = ptrs[i];
|
||||
// If entry not in block, finish block and resume outer loop with entry.
|
||||
if (!block->contains(entry)) break;
|
||||
check_release_entry(entry);
|
||||
// Add entry to releasing bitmap.
|
||||
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
|
||||
uintx entry_bitmask = block->bitmask_for_entry(entry);
|
||||
assert((releasing & entry_bitmask) == 0,
|
||||
"Duplicate entry: " PTR_FORMAT, p2i(entry));
|
||||
releasing |= entry_bitmask;
|
||||
++count;
|
||||
}
|
||||
// Release the contiguous entries that are in block.
|
||||
release_from_block(*block, releasing);
|
||||
Atomic::sub(count, &_allocation_count);
|
||||
}
|
||||
}
|
||||
|
||||
const char* dup_name(const char* name) {
|
||||
char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC);
|
||||
strcpy(dup, name);
|
||||
return dup;
|
||||
}
|
||||
|
||||
OopStorage::OopStorage(const char* name,
|
||||
Mutex* allocate_mutex,
|
||||
Mutex* active_mutex) :
|
||||
_name(dup_name(name)),
|
||||
_active_list(&Block::get_active_entry),
|
||||
_allocate_list(&Block::get_allocate_entry),
|
||||
_active_head(NULL),
|
||||
_allocate_mutex(allocate_mutex),
|
||||
_active_mutex(active_mutex),
|
||||
_allocation_count(0),
|
||||
_block_count(0),
|
||||
_empty_block_count(0),
|
||||
_concurrent_iteration_active(false)
|
||||
{
|
||||
assert(_active_mutex->rank() < _allocate_mutex->rank(),
|
||||
"%s: active_mutex must have lower rank than allocate_mutex", _name);
|
||||
assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
|
||||
"%s: active mutex requires safepoint check", _name);
|
||||
assert(_allocate_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
|
||||
"%s: allocate mutex requires safepoint check", _name);
|
||||
}
|
||||
|
||||
void OopStorage::delete_empty_block(const Block& block) {
|
||||
assert(block.is_empty(), "discarding non-empty block");
|
||||
log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
|
||||
Block::delete_block(block);
|
||||
}
|
||||
|
||||
OopStorage::~OopStorage() {
|
||||
Block* block;
|
||||
while ((block = _allocate_list.head()) != NULL) {
|
||||
_allocate_list.unlink(*block);
|
||||
}
|
||||
while ((block = _active_list.head()) != NULL) {
|
||||
_active_list.unlink(*block);
|
||||
Block::delete_block(*block);
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(char, _name);
|
||||
}
|
||||
|
||||
void OopStorage::delete_empty_blocks_safepoint(size_t retain) {
|
||||
assert_at_safepoint();
|
||||
// Don't interfere with a concurrent iteration.
|
||||
if (_concurrent_iteration_active) return;
|
||||
// Compute the number of blocks to remove, to minimize volatile accesses.
|
||||
size_t empty_blocks = _empty_block_count;
|
||||
if (retain < empty_blocks) {
|
||||
size_t remove_count = empty_blocks - retain;
|
||||
// Update volatile counters once.
|
||||
_block_count -= remove_count;
|
||||
_empty_block_count -= remove_count;
|
||||
do {
|
||||
const Block* block = _allocate_list.ctail();
|
||||
assert(block != NULL, "invariant");
|
||||
assert(block->is_empty(), "invariant");
|
||||
// Remove block from lists, and delete it.
|
||||
_active_list.unlink(*block);
|
||||
_allocate_list.unlink(*block);
|
||||
delete_empty_block(*block);
|
||||
} while (--remove_count > 0);
|
||||
// Update _active_head, in case current value was in deleted set.
|
||||
_active_head = _active_list.head();
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorage::delete_empty_blocks_concurrent(size_t retain) {
|
||||
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
// Other threads could be adding to the empty block count while we
|
||||
// release the mutex across the block deletions. Set an upper bound
|
||||
// on how many blocks we'll try to release, so other threads can't
|
||||
// cause an unbounded stay in this function.
|
||||
if (_empty_block_count <= retain) return;
|
||||
size_t limit = _empty_block_count - retain;
|
||||
for (size_t i = 0; (i < limit) && (retain < _empty_block_count); ++i) {
|
||||
const Block* block = _allocate_list.ctail();
|
||||
assert(block != NULL, "invariant");
|
||||
assert(block->is_empty(), "invariant");
|
||||
{
|
||||
MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
|
||||
// Don't interfere with a concurrent iteration.
|
||||
if (_concurrent_iteration_active) return;
|
||||
// Remove block from _active_list, updating head if needed.
|
||||
_active_list.unlink(*block);
|
||||
--_block_count;
|
||||
if (block == _active_head) {
|
||||
_active_head = _active_list.head();
|
||||
}
|
||||
}
|
||||
// Remove block from _allocate_list and delete it.
|
||||
_allocate_list.unlink(*block);
|
||||
--_empty_block_count;
|
||||
// Release mutex while deleting block.
|
||||
MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
delete_empty_block(*block);
|
||||
}
|
||||
}
|
||||
|
||||
OopStorage::EntryStatus
|
||||
OopStorage::allocation_status_validating_block(const Block* block,
|
||||
const oop* ptr) const {
|
||||
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
if ((block == NULL) || !is_valid_block_locked_or_safepoint(block)) {
|
||||
return INVALID_ENTRY;
|
||||
} else if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
|
||||
return ALLOCATED_ENTRY;
|
||||
} else {
|
||||
return UNALLOCATED_ENTRY;
|
||||
}
|
||||
}
|
||||
|
||||
OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
|
||||
return allocation_status_validating_block(find_block_or_null(ptr), ptr);
|
||||
}
|
||||
|
||||
size_t OopStorage::allocation_count() const {
|
||||
return _allocation_count;
|
||||
}
|
||||
|
||||
size_t OopStorage::block_count() const {
|
||||
return _block_count;
|
||||
}
|
||||
|
||||
size_t OopStorage::empty_block_count() const {
|
||||
return _empty_block_count;
|
||||
}
|
||||
|
||||
size_t OopStorage::total_memory_usage() const {
|
||||
size_t total_size = sizeof(OopStorage);
|
||||
total_size += strlen(name()) + 1;
|
||||
total_size += block_count() * Block::allocation_size();
|
||||
return total_size;
|
||||
}
|
||||
|
||||
// Parallel iteration support
|
||||
#if INCLUDE_ALL_GCS
|
||||
|
||||
static char* not_started_marker_dummy = NULL;
|
||||
static void* const not_started_marker = ¬_started_marker_dummy;
|
||||
|
||||
OopStorage::BasicParState::BasicParState(OopStorage* storage, bool concurrent) :
|
||||
_storage(storage),
|
||||
_next_block(not_started_marker),
|
||||
_concurrent(concurrent)
|
||||
{
|
||||
update_iteration_state(true);
|
||||
}
|
||||
|
||||
OopStorage::BasicParState::~BasicParState() {
|
||||
update_iteration_state(false);
|
||||
}
|
||||
|
||||
void OopStorage::BasicParState::update_iteration_state(bool value) {
|
||||
if (_concurrent) {
|
||||
MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag);
|
||||
assert(_storage->_concurrent_iteration_active != value, "precondition");
|
||||
_storage->_concurrent_iteration_active = value;
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorage::BasicParState::ensure_iteration_started() {
|
||||
if (!_concurrent) assert_at_safepoint();
|
||||
assert(!_concurrent || _storage->_concurrent_iteration_active, "invariant");
|
||||
// Ensure _next_block is not the not_started_marker, setting it to
|
||||
// the _active_head to start the iteration if necessary.
|
||||
if (OrderAccess::load_acquire(&_next_block) == not_started_marker) {
|
||||
Atomic::cmpxchg(_storage->_active_head, &_next_block, not_started_marker);
|
||||
}
|
||||
assert(_next_block != not_started_marker, "postcondition");
|
||||
}
|
||||
|
||||
OopStorage::Block* OopStorage::BasicParState::claim_next_block() {
|
||||
assert(_next_block != not_started_marker, "Iteration not started");
|
||||
void* next = _next_block;
|
||||
while (next != NULL) {
|
||||
void* new_next = _storage->_active_list.next(*static_cast<Block*>(next));
|
||||
void* fetched = Atomic::cmpxchg(new_next, &_next_block, next);
|
||||
if (fetched == next) break; // Claimed.
|
||||
next = fetched;
|
||||
}
|
||||
return static_cast<Block*>(next);
|
||||
}
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
const char* OopStorage::name() const { return _name; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void OopStorage::print_on(outputStream* st) const {
|
||||
size_t allocations = _allocation_count;
|
||||
size_t blocks = _block_count;
|
||||
size_t empties = _empty_block_count;
|
||||
// Comparison is being careful about racy accesses.
|
||||
size_t used = (blocks < empties) ? 0 : (blocks - empties);
|
||||
|
||||
double data_size = section_size * section_count;
|
||||
double alloc_percentage = percent_of((double)allocations, used * data_size);
|
||||
|
||||
st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), "
|
||||
SIZE_FORMAT " empties, " SIZE_FORMAT " bytes",
|
||||
name(), allocations, used, alloc_percentage,
|
||||
empties, total_memory_usage());
|
||||
if (_concurrent_iteration_active) {
|
||||
st->print(", concurrent iteration active");
|
||||
}
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
279
src/hotspot/share/gc/shared/oopStorage.hpp
Normal file
279
src/hotspot/share/gc/shared/oopStorage.hpp
Normal file
@ -0,0 +1,279 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP
|
||||
#define SHARE_GC_SHARED_OOPSTORAGE_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class Mutex;
|
||||
class outputStream;
|
||||
|
||||
// OopStorage supports management of off-heap references to objects allocated
|
||||
// in the Java heap. An OopStorage object provides a set of Java object
|
||||
// references (oop values), which clients refer to via oop* handles to the
|
||||
// associated OopStorage entries. Clients allocate entries to create a
|
||||
// (possibly weak) reference to a Java object, use that reference, and release
|
||||
// the reference when no longer needed.
|
||||
//
|
||||
// The garbage collector must know about all OopStorage objects and their
|
||||
// reference strength. OopStorage provides the garbage collector with support
|
||||
// for iteration over all the allocated entries.
|
||||
//
|
||||
// There are several categories of interaction with an OopStorage object.
|
||||
//
|
||||
// (1) allocation and release of entries, by the mutator or the VM.
|
||||
// (2) iteration by the garbage collector, possibly concurrent with mutator.
|
||||
// (3) iteration by other, non-GC, tools (only at safepoints).
|
||||
// (4) cleanup of unused internal storage, possibly concurrent with mutator.
|
||||
//
|
||||
// A goal of OopStorage is to make these interactions thread-safe, while
|
||||
// minimizing potential lock contention issues within and between these
|
||||
// categories. In particular, support for concurrent iteration by the garbage
|
||||
// collector, under certain restrictions, is required. Further, it must not
|
||||
// block nor be blocked by other operations for long periods.
|
||||
//
|
||||
// Internally, OopStorage is a set of Block objects, from which entries are
|
||||
// allocated and released. A block contains an oop[] and a bitmask indicating
|
||||
// which entries are in use (have been allocated and not yet released). New
|
||||
// blocks are constructed and added to the storage object when an entry
|
||||
// allocation request is made and there are no blocks with unused entries.
|
||||
// Blocks may be removed and deleted when empty.
|
||||
//
|
||||
// There are two important (and somewhat intertwined) protocols governing
|
||||
// concurrent access to a storage object. These are the Concurrent Iteration
|
||||
// Protocol and the Allocation Protocol. See the ParState class for a
|
||||
// discussion of concurrent iteration and the management of thread
|
||||
// interactions for this protocol. Similarly, see the allocate() function for
|
||||
// a discussion of allocation.
|
||||
|
||||
class OopStorage : public CHeapObj<mtGC> {
|
||||
public:
|
||||
OopStorage(const char* name, Mutex* allocate_mutex, Mutex* active_mutex);
|
||||
~OopStorage();
|
||||
|
||||
// These count and usage accessors are racy unless at a safepoint.
|
||||
|
||||
// The number of allocated and not yet released entries.
|
||||
size_t allocation_count() const;
|
||||
|
||||
// The number of blocks of entries. Useful for sizing parallel iteration.
|
||||
size_t block_count() const;
|
||||
|
||||
// The number of blocks with no allocated entries. Useful for sizing
|
||||
// parallel iteration and scheduling block deletion.
|
||||
size_t empty_block_count() const;
|
||||
|
||||
// Total number of blocks * memory allocation per block, plus
|
||||
// bookkeeping overhead, including this storage object.
|
||||
size_t total_memory_usage() const;
|
||||
|
||||
enum EntryStatus {
|
||||
INVALID_ENTRY,
|
||||
UNALLOCATED_ENTRY,
|
||||
ALLOCATED_ENTRY
|
||||
};
|
||||
|
||||
// Locks _allocate_mutex.
|
||||
// precondition: ptr != NULL.
|
||||
EntryStatus allocation_status(const oop* ptr) const;
|
||||
|
||||
// Allocates and returns a new entry. Returns NULL if memory allocation
|
||||
// failed. Locks _allocate_mutex.
|
||||
// postcondition: *result == NULL.
|
||||
oop* allocate();
|
||||
|
||||
// Deallocates ptr, after setting its value to NULL. Locks _allocate_mutex.
|
||||
// precondition: ptr is a valid allocated entry.
|
||||
// precondition: *ptr == NULL.
|
||||
void release(const oop* ptr);
|
||||
|
||||
// Releases all the ptrs. Possibly faster than individual calls to
|
||||
// release(oop*). Best if ptrs is sorted by address. Locks
|
||||
// _allocate_mutex.
|
||||
// precondition: All elements of ptrs are valid allocated entries.
|
||||
// precondition: *ptrs[i] == NULL, for i in [0,size).
|
||||
void release(const oop* const* ptrs, size_t size);
|
||||
|
||||
// Applies f to each allocated entry's location. f must be a function or
|
||||
// function object. Assume p is either a const oop* or an oop*, depending
|
||||
// on whether the associated storage is const or non-const, respectively.
|
||||
// Then f(p) must be a valid expression. The result of invoking f(p) must
|
||||
// be implicitly convertible to bool. Iteration terminates and returns
|
||||
// false if any invocation of f returns false. Otherwise, the result of
|
||||
// iteration is true.
|
||||
// precondition: at safepoint.
|
||||
template<typename F> inline bool iterate_safepoint(F f);
|
||||
template<typename F> inline bool iterate_safepoint(F f) const;
|
||||
|
||||
// oops_do and weak_oops_do are wrappers around iterate_safepoint, providing
|
||||
// an adaptation layer allowing the use of existing is-alive closures and
|
||||
// OopClosures. Assume p is either const oop* or oop*, depending on whether
|
||||
// the associated storage is const or non-const, respectively. Then
|
||||
//
|
||||
// - closure->do_oop(p) must be a valid expression whose value is ignored.
|
||||
//
|
||||
// - is_alive->do_object_b(*p) must be a valid expression whose value is
|
||||
// convertible to bool.
|
||||
//
|
||||
// For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
|
||||
// invoked for p. If is_alive->do_object_b(*p) is false, then closure will
|
||||
// not be invoked on p, and *p will be set to NULL.
|
||||
|
||||
template<typename Closure> inline void oops_do(Closure* closure);
|
||||
template<typename Closure> inline void oops_do(Closure* closure) const;
|
||||
template<typename Closure> inline void weak_oops_do(Closure* closure);
|
||||
|
||||
template<typename IsAliveClosure, typename Closure>
|
||||
inline void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel iteration is for the exclusive use of the GC.
|
||||
// Other clients must use serial iteration.
|
||||
template<bool concurrent, bool is_const> class ParState;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Block cleanup functions are for the exclusive use of the GC.
|
||||
// Both stop deleting if there is an in-progress concurrent iteration.
|
||||
// Concurrent deletion locks both the allocate_mutex and the active_mutex.
|
||||
void delete_empty_blocks_safepoint(size_t retain = 1);
|
||||
void delete_empty_blocks_concurrent(size_t retain = 1);
|
||||
|
||||
// Debugging and logging support.
|
||||
const char* name() const;
|
||||
void print_on(outputStream* st) const PRODUCT_RETURN;
|
||||
|
||||
// Provides access to storage internals, for unit testing.
|
||||
// Declare, but not define, the public class OopStorage::TestAccess.
|
||||
// That class is defined as part of the unit-test. It "exports" the needed
|
||||
// private types by providing public typedefs for them.
|
||||
class TestAccess;
|
||||
|
||||
// xlC on AIX can't compile test_oopStorage.cpp with following private
|
||||
// classes. C++03 introduced access for nested classes with DR45, but xlC
|
||||
// version 12 rejects it.
|
||||
NOT_AIX( private: )
|
||||
class Block; // Forward decl; defined in .inline.hpp file.
|
||||
class BlockList; // Forward decl for BlockEntry friend decl.
|
||||
|
||||
class BlockEntry VALUE_OBJ_CLASS_SPEC {
|
||||
friend class BlockList;
|
||||
|
||||
// Members are mutable, and we deal exclusively with pointers to
|
||||
// const, to make const blocks easier to use; a block being const
|
||||
// doesn't prevent modifying its list state.
|
||||
mutable const Block* _prev;
|
||||
mutable const Block* _next;
|
||||
|
||||
// Noncopyable.
|
||||
BlockEntry(const BlockEntry&);
|
||||
BlockEntry& operator=(const BlockEntry&);
|
||||
|
||||
public:
|
||||
BlockEntry();
|
||||
~BlockEntry();
|
||||
};
|
||||
|
||||
class BlockList VALUE_OBJ_CLASS_SPEC {
|
||||
const Block* _head;
|
||||
const Block* _tail;
|
||||
const BlockEntry& (*_get_entry)(const Block& block);
|
||||
|
||||
// Noncopyable.
|
||||
BlockList(const BlockList&);
|
||||
BlockList& operator=(const BlockList&);
|
||||
|
||||
public:
|
||||
BlockList(const BlockEntry& (*get_entry)(const Block& block));
|
||||
~BlockList();
|
||||
|
||||
Block* head();
|
||||
const Block* chead() const;
|
||||
const Block* ctail() const;
|
||||
|
||||
Block* prev(Block& block);
|
||||
Block* next(Block& block);
|
||||
|
||||
const Block* prev(const Block& block) const;
|
||||
const Block* next(const Block& block) const;
|
||||
|
||||
void push_front(const Block& block);
|
||||
void push_back(const Block& block);
|
||||
void unlink(const Block& block);
|
||||
};
|
||||
|
||||
private:
|
||||
const char* _name;
|
||||
BlockList _active_list;
|
||||
BlockList _allocate_list;
|
||||
Block* volatile _active_head;
|
||||
|
||||
Mutex* _allocate_mutex;
|
||||
Mutex* _active_mutex;
|
||||
|
||||
// Counts are volatile for racy unlocked accesses.
|
||||
volatile size_t _allocation_count;
|
||||
volatile size_t _block_count;
|
||||
volatile size_t _empty_block_count;
|
||||
// mutable because this gets set even for const iteration.
|
||||
mutable bool _concurrent_iteration_active;
|
||||
|
||||
Block* find_block_or_null(const oop* ptr) const;
|
||||
bool is_valid_block_locked_or_safepoint(const Block* block) const;
|
||||
EntryStatus allocation_status_validating_block(const Block* block, const oop* ptr) const;
|
||||
void check_release(const Block* block, const oop* ptr) const NOT_DEBUG_RETURN;
|
||||
void release_from_block(Block& block, uintx release_bitmask);
|
||||
void delete_empty_block(const Block& block);
|
||||
|
||||
static void assert_at_safepoint() NOT_DEBUG_RETURN;
|
||||
|
||||
template<typename F, typename Storage>
|
||||
static bool iterate_impl(F f, Storage* storage);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Implementation support for parallel iteration
|
||||
class BasicParState;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Wrapper for OopClosure-style function, so it can be used with
|
||||
// iterate. Assume p is of type oop*. Then cl->do_oop(p) must be a
|
||||
// valid expression whose value may be ignored.
|
||||
template<typename Closure> class OopFn;
|
||||
template<typename Closure> static OopFn<Closure> oop_fn(Closure* cl);
|
||||
|
||||
// Wrapper for BoolObjectClosure + iteration handler pair, so they
|
||||
// can be used with iterate.
|
||||
template<typename IsAlive, typename F> class IfAliveFn;
|
||||
template<typename IsAlive, typename F>
|
||||
static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f);
|
||||
|
||||
// Wrapper for iteration handler, automatically skipping NULL entries.
|
||||
template<typename F> class SkipNullFn;
|
||||
template<typename F> static SkipNullFn<F> skip_null_fn(F f);
|
||||
};
|
||||
|
||||
#endif // include guard
|
289
src/hotspot/share/gc/shared/oopStorage.inline.hpp
Normal file
289
src/hotspot/share/gc/shared/oopStorage.inline.hpp
Normal file
@ -0,0 +1,289 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
|
||||
#define SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
|
||||
|
||||
#include "gc/shared/oopStorage.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "metaprogramming/conditional.hpp"
|
||||
#include "metaprogramming/isConst.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "utilities/count_trailing_zeros.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
|
||||
// _data must be the first non-static data member, for alignment.
|
||||
oop _data[BitsPerWord];
|
||||
static const unsigned _data_pos = 0; // Position of _data.
|
||||
|
||||
volatile uintx _allocated_bitmask; // One bit per _data element.
|
||||
const OopStorage* _owner;
|
||||
void* _memory; // Unaligned storage containing block.
|
||||
BlockEntry _active_entry;
|
||||
BlockEntry _allocate_entry;
|
||||
|
||||
Block(const OopStorage* owner, void* memory);
|
||||
~Block();
|
||||
|
||||
void check_index(unsigned index) const;
|
||||
unsigned get_index(const oop* ptr) const;
|
||||
|
||||
template<typename F, typename BlockPtr>
|
||||
static bool iterate_impl(F f, BlockPtr b);
|
||||
|
||||
// Noncopyable.
|
||||
Block(const Block&);
|
||||
Block& operator=(const Block&);
|
||||
|
||||
public:
|
||||
static const BlockEntry& get_active_entry(const Block& block);
|
||||
static const BlockEntry& get_allocate_entry(const Block& block);
|
||||
|
||||
static size_t allocation_size();
|
||||
static size_t allocation_alignment_shift();
|
||||
|
||||
oop* get_pointer(unsigned index);
|
||||
const oop* get_pointer(unsigned index) const;
|
||||
|
||||
uintx bitmask_for_index(unsigned index) const;
|
||||
uintx bitmask_for_entry(const oop* ptr) const;
|
||||
|
||||
// Allocation bitmask accessors are racy.
|
||||
bool is_full() const;
|
||||
bool is_empty() const;
|
||||
uintx allocated_bitmask() const;
|
||||
uintx cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value);
|
||||
|
||||
bool contains(const oop* ptr) const;
|
||||
|
||||
// Returns NULL if ptr is not in a block or not allocated in that block.
|
||||
static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
|
||||
|
||||
oop* allocate();
|
||||
static Block* new_block(const OopStorage* owner);
|
||||
static void delete_block(const Block& block);
|
||||
|
||||
template<typename F> bool iterate(F f);
|
||||
template<typename F> bool iterate(F f) const;
|
||||
}; // class Block
|
||||
|
||||
inline OopStorage::Block* OopStorage::BlockList::head() {
|
||||
return const_cast<Block*>(_head);
|
||||
}
|
||||
|
||||
inline const OopStorage::Block* OopStorage::BlockList::chead() const {
|
||||
return _head;
|
||||
}
|
||||
|
||||
inline const OopStorage::Block* OopStorage::BlockList::ctail() const {
|
||||
return _tail;
|
||||
}
|
||||
|
||||
inline OopStorage::Block* OopStorage::BlockList::prev(Block& block) {
|
||||
return const_cast<Block*>(_get_entry(block)._prev);
|
||||
}
|
||||
|
||||
inline OopStorage::Block* OopStorage::BlockList::next(Block& block) {
|
||||
return const_cast<Block*>(_get_entry(block)._next);
|
||||
}
|
||||
|
||||
inline const OopStorage::Block* OopStorage::BlockList::prev(const Block& block) const {
|
||||
return _get_entry(block)._prev;
|
||||
}
|
||||
|
||||
inline const OopStorage::Block* OopStorage::BlockList::next(const Block& block) const {
|
||||
return _get_entry(block)._next;
|
||||
}
|
||||
|
||||
template<typename Closure>
|
||||
class OopStorage::OopFn VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
explicit OopFn(Closure* cl) : _cl(cl) {}
|
||||
|
||||
template<typename OopPtr> // [const] oop*
|
||||
bool operator()(OopPtr ptr) const {
|
||||
_cl->do_oop(ptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
Closure* _cl;
|
||||
};
|
||||
|
||||
template<typename Closure>
|
||||
inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
|
||||
return OopFn<Closure>(cl);
|
||||
}
|
||||
|
||||
template<typename IsAlive, typename F>
|
||||
class OopStorage::IfAliveFn VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {}
|
||||
|
||||
bool operator()(oop* ptr) const {
|
||||
bool result = true;
|
||||
oop v = *ptr;
|
||||
if (v != NULL) {
|
||||
if (_is_alive->do_object_b(v)) {
|
||||
result = _f(ptr);
|
||||
} else {
|
||||
*ptr = NULL; // Clear dead value.
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
IsAlive* _is_alive;
|
||||
F _f;
|
||||
};
|
||||
|
||||
template<typename IsAlive, typename F>
|
||||
inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) {
|
||||
return IfAliveFn<IsAlive, F>(is_alive, f);
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
class OopStorage::SkipNullFn VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
SkipNullFn(F f) : _f(f) {}
|
||||
|
||||
template<typename OopPtr> // [const] oop*
|
||||
bool operator()(OopPtr ptr) const {
|
||||
return (*ptr != NULL) ? _f(ptr) : true;
|
||||
}
|
||||
|
||||
private:
|
||||
F _f;
|
||||
};
|
||||
|
||||
template<typename F>
|
||||
inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
|
||||
return SkipNullFn<F>(f);
|
||||
}
|
||||
|
||||
// Inline Block accesses for use in iteration inner loop.
|
||||
|
||||
inline void OopStorage::Block::check_index(unsigned index) const {
|
||||
assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
|
||||
}
|
||||
|
||||
inline oop* OopStorage::Block::get_pointer(unsigned index) {
|
||||
check_index(index);
|
||||
return &_data[index];
|
||||
}
|
||||
|
||||
inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
|
||||
check_index(index);
|
||||
return &_data[index];
|
||||
}
|
||||
|
||||
inline uintx OopStorage::Block::allocated_bitmask() const {
|
||||
return _allocated_bitmask;
|
||||
}
|
||||
|
||||
inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
|
||||
check_index(index);
|
||||
return uintx(1) << index;
|
||||
}
|
||||
|
||||
// Provide const or non-const iteration, depending on whether BlockPtr
|
||||
// is const Block* or Block*, respectively.
|
||||
template<typename F, typename BlockPtr> // BlockPtr := [const] Block*
|
||||
inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) {
|
||||
uintx bitmask = block->allocated_bitmask();
|
||||
while (bitmask != 0) {
|
||||
unsigned index = count_trailing_zeros(bitmask);
|
||||
bitmask ^= block->bitmask_for_index(index);
|
||||
if (!f(block->get_pointer(index))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
inline bool OopStorage::Block::iterate(F f) {
|
||||
return iterate_impl(f, this);
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
inline bool OopStorage::Block::iterate(F f) const {
|
||||
return iterate_impl(f, this);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Support for serial iteration, always at a safepoint.
|
||||
|
||||
// Provide const or non-const iteration, depending on whether Storage is
|
||||
// const OopStorage* or OopStorage*, respectively.
|
||||
template<typename F, typename Storage> // Storage := [const] OopStorage
|
||||
inline bool OopStorage::iterate_impl(F f, Storage* storage) {
|
||||
assert_at_safepoint();
|
||||
// Propagate const/non-const iteration to the block layer, by using
|
||||
// const or non-const blocks as corresponding to Storage.
|
||||
typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr;
|
||||
for (BlockPtr block = storage->_active_head;
|
||||
block != NULL;
|
||||
block = storage->_active_list.next(*block)) {
|
||||
if (!block->iterate(f)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
inline bool OopStorage::iterate_safepoint(F f) {
|
||||
return iterate_impl(f, this);
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
inline bool OopStorage::iterate_safepoint(F f) const {
|
||||
return iterate_impl(f, this);
|
||||
}
|
||||
|
||||
template<typename Closure>
|
||||
inline void OopStorage::oops_do(Closure* cl) {
|
||||
iterate_safepoint(oop_fn(cl));
|
||||
}
|
||||
|
||||
template<typename Closure>
|
||||
inline void OopStorage::oops_do(Closure* cl) const {
|
||||
iterate_safepoint(oop_fn(cl));
|
||||
}
|
||||
|
||||
template<typename Closure>
|
||||
inline void OopStorage::weak_oops_do(Closure* cl) {
|
||||
iterate_safepoint(skip_null_fn(oop_fn(cl)));
|
||||
}
|
||||
|
||||
template<typename IsAliveClosure, typename Closure>
|
||||
inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
|
||||
iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl)));
|
||||
}
|
||||
|
||||
#endif // include guard
|
239
src/hotspot/share/gc/shared/oopStorageParState.inline.hpp
Normal file
239
src/hotspot/share/gc/shared/oopStorageParState.inline.hpp
Normal file
@ -0,0 +1,239 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_SHARED_OOPSTORAGEPARSTATE_INLINE_HPP
|
||||
#define SHARE_GC_SHARED_OOPSTORAGEPARSTATE_INLINE_HPP
|
||||
|
||||
#include "gc/shared/oopStorage.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "metaprogramming/conditional.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Support for parallel and optionally concurrent state iteration.
|
||||
//
|
||||
// Parallel iteration is for the exclusive use of the GC. Other iteration
|
||||
// clients must use serial iteration.
|
||||
//
|
||||
// Concurrent Iteration
|
||||
//
|
||||
// Iteration involves the _active_list, which contains all of the blocks owned
|
||||
// by a storage object. This is a doubly-linked list, linked through
|
||||
// dedicated fields in the blocks.
|
||||
//
|
||||
// At most one concurrent ParState can exist at a time for a given storage
|
||||
// object.
|
||||
//
|
||||
// A concurrent ParState sets the associated storage's
|
||||
// _concurrent_iteration_active flag true when the state is constructed, and
|
||||
// sets it false when the state is destroyed. These assignments are made with
|
||||
// _active_mutex locked. Meanwhile, empty block deletion is not done while
|
||||
// _concurrent_iteration_active is true. The flag check and the dependent
|
||||
// removal of a block from the _active_list is performed with _active_mutex
|
||||
// locked. This prevents concurrent iteration and empty block deletion from
|
||||
// interfering with with each other.
|
||||
//
|
||||
// Both allocate() and delete_empty_blocks_concurrent() lock the
|
||||
// _allocate_mutex while performing their respective list manipulations,
|
||||
// preventing them from interfering with each other.
|
||||
//
|
||||
// When allocate() creates a new block, it is added to the front of the
|
||||
// _active_list. Then _active_head is set to the new block. When concurrent
|
||||
// iteration is started (by a parallel worker thread calling the state's
|
||||
// iterate() function), the current _active_head is used as the initial block
|
||||
// for the iteration, with iteration proceeding down the list headed by that
|
||||
// block.
|
||||
//
|
||||
// As a result, the list over which concurrent iteration operates is stable.
|
||||
// However, once the iteration is started, later allocations may add blocks to
|
||||
// the front of the list that won't be examined by the iteration. And while
|
||||
// the list is stable, concurrent allocate() and release() operations may
|
||||
// change the set of allocated entries in a block at any time during the
|
||||
// iteration.
|
||||
//
|
||||
// As a result, a concurrent iteration handler must accept that some
|
||||
// allocations and releases that occur after the iteration started will not be
|
||||
// seen by the iteration. Further, some may overlap examination by the
|
||||
// iteration. To help with this, allocate() and release() have an invariant
|
||||
// that an entry's value must be NULL when it is not in use.
|
||||
//
|
||||
// An in-progress delete_empty_blocks_concurrent() operation can contend with
|
||||
// the start of a concurrent iteration over the _active_mutex. Since both are
|
||||
// under GC control, that potential contention can be eliminated by never
|
||||
// scheduling both operations to run at the same time.
|
||||
//
|
||||
// ParState<concurrent, is_const>
|
||||
// concurrent must be true if iteration is concurrent with the
|
||||
// mutator, false if iteration is at a safepoint.
|
||||
//
|
||||
// is_const must be true if the iteration is over a constant storage
|
||||
// object, false if the iteration may modify the storage object.
|
||||
//
|
||||
// ParState([const] OopStorage* storage)
|
||||
// Construct an object for managing an iteration over storage. For a
|
||||
// concurrent ParState, empty block deletion for the associated storage
|
||||
// is inhibited for the life of the ParState. There can be no more
|
||||
// than one live concurrent ParState at a time for a given storage object.
|
||||
//
|
||||
// template<typename F> void iterate(F f)
|
||||
// Repeatedly claims a block from the associated storage that has
|
||||
// not been processed by this iteration (possibly by other threads),
|
||||
// and applies f to each entry in the claimed block. Assume p is of
|
||||
// type const oop* or oop*, according to is_const. Then f(p) must be
|
||||
// a valid expression whose value is ignored. Concurrent uses must
|
||||
// be prepared for an entry's value to change at any time, due to
|
||||
// mutator activity.
|
||||
//
|
||||
// template<typename Closure> void oops_do(Closure* cl)
|
||||
// Wrapper around iterate, providing an adaptation layer allowing
|
||||
// the use of OopClosures and similar objects for iteration. Assume
|
||||
// p is of type const oop* or oop*, according to is_const. Then
|
||||
// cl->do_oop(p) must be a valid expression whose value is ignored.
|
||||
// Concurrent uses must be prepared for the entry's value to change
|
||||
// at any time, due to mutator activity.
|
||||
//
|
||||
// Optional operations, provided only if !concurrent && !is_const.
|
||||
// These are not provided when is_const, because the storage object
|
||||
// may be modified by the iteration infrastructure, even if the
|
||||
// provided closure doesn't modify the storage object. These are not
|
||||
// provided when concurrent because any pre-filtering behavior by the
|
||||
// iteration infrastructure is inappropriate for concurrent iteration;
|
||||
// modifications of the storage by the mutator could result in the
|
||||
// pre-filtering being applied (successfully or not) to objects that
|
||||
// are unrelated to what the closure finds in the entry.
|
||||
//
|
||||
// template<typename Closure> void weak_oops_do(Closure* cl)
|
||||
// template<typename IsAliveClosure, typename Closure>
|
||||
// void weak_oops_do(IsAliveClosure* is_alive, Closure* cl)
|
||||
// Wrappers around iterate, providing an adaptation layer allowing
|
||||
// the use of is-alive closures and OopClosures for iteration.
|
||||
// Assume p is of type oop*. Then
|
||||
//
|
||||
// - cl->do_oop(p) must be a valid expression whose value is ignored.
|
||||
//
|
||||
// - is_alive->do_object_b(*p) must be a valid expression whose value
|
||||
// is convertible to bool.
|
||||
//
|
||||
// If *p == NULL then neither is_alive nor cl will be invoked for p.
|
||||
// If is_alive->do_object_b(*p) is false, then cl will not be
|
||||
// invoked on p.
|
||||
|
||||
class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC {
|
||||
OopStorage* _storage;
|
||||
void* volatile _next_block;
|
||||
bool _concurrent;
|
||||
|
||||
// Noncopyable.
|
||||
BasicParState(const BasicParState&);
|
||||
BasicParState& operator=(const BasicParState&);
|
||||
|
||||
void update_iteration_state(bool value);
|
||||
void ensure_iteration_started();
|
||||
Block* claim_next_block();
|
||||
|
||||
// Wrapper for iteration handler; ignore handler result and return true.
|
||||
template<typename F> class AlwaysTrueFn;
|
||||
|
||||
public:
|
||||
BasicParState(OopStorage* storage, bool concurrent);
|
||||
~BasicParState();
|
||||
|
||||
template<bool is_const, typename F> void iterate(F f) {
|
||||
// Wrap f in ATF so we can use Block::iterate.
|
||||
AlwaysTrueFn<F> atf_f(f);
|
||||
ensure_iteration_started();
|
||||
typename Conditional<is_const, const Block*, Block*>::type block;
|
||||
while ((block = claim_next_block()) != NULL) {
|
||||
block->iterate(atf_f);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template<typename F>
|
||||
class OopStorage::BasicParState::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC {
|
||||
F _f;
|
||||
|
||||
public:
|
||||
AlwaysTrueFn(F f) : _f(f) {}
|
||||
|
||||
template<typename OopPtr> // [const] oop*
|
||||
bool operator()(OopPtr ptr) const { _f(ptr); return true; }
|
||||
};
|
||||
|
||||
template<bool concurrent, bool is_const>
|
||||
class OopStorage::ParState VALUE_OBJ_CLASS_SPEC {
|
||||
BasicParState _basic_state;
|
||||
|
||||
public:
|
||||
ParState(const OopStorage* storage) :
|
||||
// For simplicity, always recorded as non-const.
|
||||
_basic_state(const_cast<OopStorage*>(storage), concurrent)
|
||||
{}
|
||||
|
||||
template<typename F>
|
||||
void iterate(F f) {
|
||||
_basic_state.template iterate<is_const>(f);
|
||||
}
|
||||
|
||||
template<typename Closure>
|
||||
void oops_do(Closure* cl) {
|
||||
this->iterate(oop_fn(cl));
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
class OopStorage::ParState<false, false> VALUE_OBJ_CLASS_SPEC {
|
||||
BasicParState _basic_state;
|
||||
|
||||
public:
|
||||
ParState(OopStorage* storage) :
|
||||
_basic_state(storage, false)
|
||||
{}
|
||||
|
||||
template<typename F>
|
||||
void iterate(F f) {
|
||||
_basic_state.template iterate<false>(f);
|
||||
}
|
||||
|
||||
template<typename Closure>
|
||||
void oops_do(Closure* cl) {
|
||||
this->iterate(oop_fn(cl));
|
||||
}
|
||||
|
||||
template<typename Closure>
|
||||
void weak_oops_do(Closure* cl) {
|
||||
this->iterate(skip_null_fn(oop_fn(cl)));
|
||||
}
|
||||
|
||||
template<typename IsAliveClosure, typename Closure>
|
||||
void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
|
||||
this->iterate(if_alive_fn(is_alive, oop_fn(cl)));
|
||||
}
|
||||
};
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#endif // include guard
|
@ -207,8 +207,7 @@ int Bytecode_loadconstant::pool_index() const {
|
||||
|
||||
BasicType Bytecode_loadconstant::result_type() const {
|
||||
int index = pool_index();
|
||||
constantTag tag = _method->constants()->tag_at(index);
|
||||
return tag.basic_type();
|
||||
return _method->constants()->basic_type_for_constant_at(index);
|
||||
}
|
||||
|
||||
oop Bytecode_loadconstant::resolve_constant(TRAPS) const {
|
||||
@ -217,6 +216,8 @@ oop Bytecode_loadconstant::resolve_constant(TRAPS) const {
|
||||
ConstantPool* constants = _method->constants();
|
||||
if (has_cache_index()) {
|
||||
return constants->resolve_cached_constant_at(index, THREAD);
|
||||
} else if (_method->constants()->tag_at(index).is_dynamic_constant()) {
|
||||
return constants->resolve_possibly_cached_constant_at(index, THREAD);
|
||||
} else {
|
||||
return constants->resolve_constant_at(index, THREAD);
|
||||
}
|
||||
|
@ -2368,6 +2368,30 @@ run:
|
||||
THREAD->set_vm_result(NULL);
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
{
|
||||
oop result = constants->resolved_references()->obj_at(index);
|
||||
if (result == NULL) {
|
||||
CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
|
||||
result = THREAD->vm_result();
|
||||
}
|
||||
VERIFY_OOP(result);
|
||||
|
||||
jvalue value;
|
||||
BasicType type = java_lang_boxing_object::get_value(result, &value);
|
||||
switch (type) {
|
||||
case T_FLOAT: SET_STACK_FLOAT(value.f, 0); break;
|
||||
case T_INT: SET_STACK_INT(value.i, 0); break;
|
||||
case T_SHORT: SET_STACK_INT(value.s, 0); break;
|
||||
case T_BYTE: SET_STACK_INT(value.b, 0); break;
|
||||
case T_CHAR: SET_STACK_INT(value.c, 0); break;
|
||||
case T_BOOLEAN: SET_STACK_INT(value.z, 0); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
|
||||
@ -2387,6 +2411,27 @@ run:
|
||||
case JVM_CONSTANT_Double:
|
||||
SET_STACK_DOUBLE(constants->double_at(index), 1);
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
{
|
||||
oop result = constants->resolved_references()->obj_at(index);
|
||||
if (result == NULL) {
|
||||
CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
|
||||
result = THREAD->vm_result();
|
||||
}
|
||||
VERIFY_OOP(result);
|
||||
|
||||
jvalue value;
|
||||
BasicType type = java_lang_boxing_object::get_value(result, &value);
|
||||
switch (type) {
|
||||
case T_DOUBLE: SET_STACK_DOUBLE(value.d, 1); break;
|
||||
case T_LONG: SET_STACK_LONG(value.j, 1); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
|
||||
@ -2404,7 +2449,7 @@ run:
|
||||
incr = 3;
|
||||
}
|
||||
|
||||
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
|
||||
// We are resolved if the resolved_references array contains a non-null object (CallSite, etc.)
|
||||
// This kind of CP cache entry does not need to match the flags byte, because
|
||||
// there is a 1-1 relation between bytecode type and CP entry type.
|
||||
ConstantPool* constants = METHOD->constants();
|
||||
@ -2414,6 +2459,8 @@ run:
|
||||
handle_exception);
|
||||
result = THREAD->vm_result();
|
||||
}
|
||||
if (result == Universe::the_null_sentinel())
|
||||
result = NULL;
|
||||
|
||||
VERIFY_OOP(result);
|
||||
SET_STACK_OBJECT(result, 0);
|
||||
@ -2425,7 +2472,7 @@ run:
|
||||
u4 index = Bytes::get_native_u4(pc+1);
|
||||
ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
|
||||
|
||||
// We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
|
||||
// We are resolved if the resolved_references array contains a non-null object (CallSite, etc.)
|
||||
// This kind of CP cache entry does not need to match the flags byte, because
|
||||
// there is a 1-1 relation between bytecode type and CP entry type.
|
||||
if (! cache->is_resolved((Bytecodes::Code) opcode)) {
|
||||
|
@ -367,6 +367,7 @@ void BytecodePrinter::print_field_or_method(int orig_i, int i, outputStream* st)
|
||||
case JVM_CONSTANT_Fieldref:
|
||||
break;
|
||||
case JVM_CONSTANT_NameAndType:
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
has_klass = false;
|
||||
break;
|
||||
@ -382,7 +383,7 @@ void BytecodePrinter::print_field_or_method(int orig_i, int i, outputStream* st)
|
||||
Symbol* klass = constants->klass_name_at(constants->uncached_klass_ref_index_at(i));
|
||||
st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string());
|
||||
} else {
|
||||
if (tag.is_invoke_dynamic()) {
|
||||
if (tag.is_dynamic_constant() || tag.is_invoke_dynamic()) {
|
||||
int bsm = constants->invoke_dynamic_bootstrap_method_ref_index_at(i);
|
||||
st->print(" bsm=%d", bsm);
|
||||
}
|
||||
|
@ -118,22 +118,54 @@ IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
|
||||
IRT_END
|
||||
|
||||
IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) {
|
||||
assert(bytecode == Bytecodes::_fast_aldc ||
|
||||
assert(bytecode == Bytecodes::_ldc ||
|
||||
bytecode == Bytecodes::_ldc_w ||
|
||||
bytecode == Bytecodes::_ldc2_w ||
|
||||
bytecode == Bytecodes::_fast_aldc ||
|
||||
bytecode == Bytecodes::_fast_aldc_w, "wrong bc");
|
||||
ResourceMark rm(thread);
|
||||
const bool is_fast_aldc = (bytecode == Bytecodes::_fast_aldc ||
|
||||
bytecode == Bytecodes::_fast_aldc_w);
|
||||
LastFrameAccessor last_frame(thread);
|
||||
methodHandle m (thread, last_frame.method());
|
||||
Bytecode_loadconstant ldc(m, last_frame.bci());
|
||||
|
||||
// Double-check the size. (Condy can have any type.)
|
||||
BasicType type = ldc.result_type();
|
||||
switch (type2size[type]) {
|
||||
case 2: guarantee(bytecode == Bytecodes::_ldc2_w, ""); break;
|
||||
case 1: guarantee(bytecode != Bytecodes::_ldc2_w, ""); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// Resolve the constant. This does not do unboxing.
|
||||
// But it does replace Universe::the_null_sentinel by null.
|
||||
oop result = ldc.resolve_constant(CHECK);
|
||||
assert(result != NULL || is_fast_aldc, "null result only valid for fast_aldc");
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
// The bytecode wrappers aren't GC-safe so construct a new one
|
||||
Bytecode_loadconstant ldc2(m, last_frame.bci());
|
||||
oop coop = m->constants()->resolved_references()->obj_at(ldc2.cache_index());
|
||||
assert(result == coop, "expected result for assembly code");
|
||||
int rindex = ldc2.cache_index();
|
||||
if (rindex < 0)
|
||||
rindex = m->constants()->cp_to_object_index(ldc2.pool_index());
|
||||
if (rindex >= 0) {
|
||||
oop coop = m->constants()->resolved_references()->obj_at(rindex);
|
||||
oop roop = (result == NULL ? Universe::the_null_sentinel() : result);
|
||||
assert(roop == coop, "expected result for assembly code");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
thread->set_vm_result(result);
|
||||
if (!is_fast_aldc) {
|
||||
// Tell the interpreter how to unbox the primitive.
|
||||
guarantee(java_lang_boxing_object::is_instance(result, type), "");
|
||||
int offset = java_lang_boxing_object::value_offset_in_bytes(type);
|
||||
intptr_t flags = ((as_TosState(type) << ConstantPoolCacheEntry::tos_state_shift)
|
||||
| (offset & ConstantPoolCacheEntry::field_index_mask));
|
||||
thread->set_vm_result_2((Metadata*)flags);
|
||||
}
|
||||
}
|
||||
IRT_END
|
||||
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "memory/universe.inline.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
@ -54,7 +55,6 @@
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
// Implementation of CallInfo
|
||||
|
||||
@ -284,20 +284,32 @@ void LinkInfo::print() {
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
// Klass resolution
|
||||
|
||||
void LinkResolver::check_klass_accessability(Klass* ref_klass, Klass* sel_klass, TRAPS) {
|
||||
void LinkResolver::check_klass_accessability(Klass* ref_klass, Klass* sel_klass,
|
||||
bool fold_type_to_class, TRAPS) {
|
||||
Klass* base_klass = sel_klass;
|
||||
if (fold_type_to_class) {
|
||||
if (sel_klass->is_objArray_klass()) {
|
||||
base_klass = ObjArrayKlass::cast(sel_klass)->bottom_klass();
|
||||
}
|
||||
// The element type could be a typeArray - we only need the access
|
||||
// check if it is an reference to another class.
|
||||
if (!base_klass->is_instance_klass()) {
|
||||
return; // no relevant check to do
|
||||
}
|
||||
}
|
||||
Reflection::VerifyClassAccessResults vca_result =
|
||||
Reflection::verify_class_access(ref_klass, InstanceKlass::cast(sel_klass), true);
|
||||
Reflection::verify_class_access(ref_klass, InstanceKlass::cast(base_klass), true);
|
||||
if (vca_result != Reflection::ACCESS_OK) {
|
||||
ResourceMark rm(THREAD);
|
||||
char* msg = Reflection::verify_class_access_msg(ref_klass,
|
||||
InstanceKlass::cast(sel_klass),
|
||||
InstanceKlass::cast(base_klass),
|
||||
vca_result);
|
||||
if (msg == NULL) {
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_IllegalAccessError(),
|
||||
"failed to access class %s from class %s",
|
||||
sel_klass->external_name(),
|
||||
base_klass->external_name(),
|
||||
ref_klass->external_name());
|
||||
} else {
|
||||
// Use module specific message returned by verify_class_access_msg().
|
||||
@ -1663,31 +1675,6 @@ void LinkResolver::resolve_handle_call(CallInfo& result,
|
||||
result.set_handle(resolved_klass, resolved_method, resolved_appendix, resolved_method_type, CHECK);
|
||||
}
|
||||
|
||||
static void wrap_invokedynamic_exception(TRAPS) {
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
// See the "Linking Exceptions" section for the invokedynamic instruction
|
||||
// in JVMS 6.5.
|
||||
if (PENDING_EXCEPTION->is_a(SystemDictionary::Error_klass())) {
|
||||
// Pass through an Error, including BootstrapMethodError, any other form
|
||||
// of linkage error, or say ThreadDeath/OutOfMemoryError
|
||||
if (TraceMethodHandles) {
|
||||
tty->print_cr("invokedynamic passes through an Error for " INTPTR_FORMAT, p2i((void *)PENDING_EXCEPTION));
|
||||
PENDING_EXCEPTION->print();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise wrap the exception in a BootstrapMethodError
|
||||
if (TraceMethodHandles) {
|
||||
tty->print_cr("invokedynamic throws BSME for " INTPTR_FORMAT, p2i((void *)PENDING_EXCEPTION));
|
||||
PENDING_EXCEPTION->print();
|
||||
}
|
||||
Handle nested_exception(THREAD, PENDING_EXCEPTION);
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
THROW_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), nested_exception)
|
||||
}
|
||||
}
|
||||
|
||||
void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHandle& pool, int index, TRAPS) {
|
||||
Symbol* method_name = pool->name_ref_at(index);
|
||||
Symbol* method_signature = pool->signature_ref_at(index);
|
||||
@ -1714,7 +1701,7 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHan
|
||||
// set the indy_rf flag since any subsequent invokedynamic instruction which shares
|
||||
// this bootstrap method will encounter the resolution of MethodHandleInError.
|
||||
oop bsm_info = pool->resolve_bootstrap_specifier_at(pool_index, THREAD);
|
||||
wrap_invokedynamic_exception(CHECK);
|
||||
Exceptions::wrap_dynamic_exception(CHECK);
|
||||
assert(bsm_info != NULL, "");
|
||||
// FIXME: Cache this once per BootstrapMethods entry, not once per CONSTANT_InvokeDynamic.
|
||||
bootstrap_specifier = Handle(THREAD, bsm_info);
|
||||
@ -1724,7 +1711,7 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHan
|
||||
Handle appendix( THREAD, cpce->appendix_if_resolved(pool));
|
||||
Handle method_type(THREAD, cpce->method_type_if_resolved(pool));
|
||||
result.set_handle(method, appendix, method_type, THREAD);
|
||||
wrap_invokedynamic_exception(CHECK);
|
||||
Exceptions::wrap_dynamic_exception(CHECK);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1737,7 +1724,7 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHan
|
||||
tty->print(" BSM info: "); bootstrap_specifier->print();
|
||||
}
|
||||
|
||||
resolve_dynamic_call(result, bootstrap_specifier, method_name,
|
||||
resolve_dynamic_call(result, pool_index, bootstrap_specifier, method_name,
|
||||
method_signature, current_klass, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION && PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
|
||||
int encoded_index = ResolutionErrorTable::encode_cpcache_index(index);
|
||||
@ -1753,7 +1740,7 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHan
|
||||
Handle appendix( THREAD, cpce->appendix_if_resolved(pool));
|
||||
Handle method_type(THREAD, cpce->method_type_if_resolved(pool));
|
||||
result.set_handle(method, appendix, method_type, THREAD);
|
||||
wrap_invokedynamic_exception(CHECK);
|
||||
Exceptions::wrap_dynamic_exception(CHECK);
|
||||
} else {
|
||||
assert(cpce->indy_resolution_failed(), "Resolution failure flag not set");
|
||||
ConstantPool::throw_resolution_error(pool, encoded_index, CHECK);
|
||||
@ -1765,6 +1752,7 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHan
|
||||
}
|
||||
|
||||
void LinkResolver::resolve_dynamic_call(CallInfo& result,
|
||||
int pool_index,
|
||||
Handle bootstrap_specifier,
|
||||
Symbol* method_name, Symbol* method_signature,
|
||||
Klass* current_klass,
|
||||
@ -1775,12 +1763,13 @@ void LinkResolver::resolve_dynamic_call(CallInfo& result,
|
||||
Handle resolved_method_type;
|
||||
methodHandle resolved_method =
|
||||
SystemDictionary::find_dynamic_call_site_invoker(current_klass,
|
||||
pool_index,
|
||||
bootstrap_specifier,
|
||||
method_name, method_signature,
|
||||
&resolved_appendix,
|
||||
&resolved_method_type,
|
||||
THREAD);
|
||||
wrap_invokedynamic_exception(CHECK);
|
||||
Exceptions::wrap_dynamic_exception(CHECK);
|
||||
result.set_handle(resolved_method, resolved_appendix, resolved_method_type, THREAD);
|
||||
wrap_invokedynamic_exception(CHECK);
|
||||
Exceptions::wrap_dynamic_exception(CHECK);
|
||||
}
|
||||
|
@ -274,7 +274,16 @@ class LinkResolver: AllStatic {
|
||||
const constantPoolHandle& pool, int index, TRAPS);
|
||||
public:
|
||||
// constant pool resolving
|
||||
static void check_klass_accessability(Klass* ref_klass, Klass* sel_klass, TRAPS);
|
||||
static void check_klass_accessability(Klass* ref_klass, Klass* sel_klass,
|
||||
bool fold_type_to_class, TRAPS);
|
||||
// The optional 'fold_type_to_class' means that a derived type (array)
|
||||
// is first converted to the class it is derived from (element type).
|
||||
// If this element type is not a class, then the check passes quietly.
|
||||
// This is usually what is needed, but a few existing uses might break
|
||||
// if this flag were always turned on. FIXME: See if it can be, always.
|
||||
static void check_klass_accessability(Klass* ref_klass, Klass* sel_klass, TRAPS) {
|
||||
return check_klass_accessability(ref_klass, sel_klass, false, THREAD);
|
||||
}
|
||||
|
||||
// static resolving calls (will not run any Java code);
|
||||
// used only from Bytecode_invoke::static_target
|
||||
@ -306,7 +315,7 @@ class LinkResolver: AllStatic {
|
||||
bool check_null_and_abstract, TRAPS);
|
||||
static void resolve_handle_call (CallInfo& result,
|
||||
const LinkInfo& link_info, TRAPS);
|
||||
static void resolve_dynamic_call (CallInfo& result, Handle bootstrap_specifier,
|
||||
static void resolve_dynamic_call (CallInfo& result, int pool_index, Handle bootstrap_specifier,
|
||||
Symbol* method_name, Symbol* method_signature,
|
||||
Klass* current_klass, TRAPS);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,6 @@
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/rewriter.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/generateOopMap.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
@ -49,7 +48,11 @@ void Rewriter::compute_index_maps() {
|
||||
case JVM_CONSTANT_Methodref : // fall through
|
||||
add_cp_cache_entry(i);
|
||||
break;
|
||||
case JVM_CONSTANT_String:
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
assert(_pool->has_dynamic_constant(), "constant pool's _has_dynamic_constant flag not set");
|
||||
add_resolved_references_entry(i);
|
||||
break;
|
||||
case JVM_CONSTANT_String : // fall through
|
||||
case JVM_CONSTANT_MethodHandle : // fall through
|
||||
case JVM_CONSTANT_MethodType : // fall through
|
||||
add_resolved_references_entry(i);
|
||||
@ -322,7 +325,14 @@ void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
|
||||
address p = bcp + offset;
|
||||
int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
|
||||
constantTag tag = _pool->tag_at(cp_index).value();
|
||||
if (tag.is_method_handle() || tag.is_method_type() || tag.is_string()) {
|
||||
|
||||
if (tag.is_method_handle() ||
|
||||
tag.is_method_type() ||
|
||||
tag.is_string() ||
|
||||
(tag.is_dynamic_constant() &&
|
||||
// keep regular ldc interpreter logic for condy primitives
|
||||
is_reference_type(FieldType::basic_type(_pool->uncached_signature_ref_at(cp_index))))
|
||||
) {
|
||||
int ref_index = cp_entry_to_resolved_references(cp_index);
|
||||
if (is_wide) {
|
||||
(*bcp) = Bytecodes::_fast_aldc_w;
|
||||
@ -556,7 +566,7 @@ void Rewriter::rewrite_bytecodes(TRAPS) {
|
||||
|
||||
void Rewriter::rewrite(InstanceKlass* klass, TRAPS) {
|
||||
if (!DumpSharedSpaces) {
|
||||
assert(!MetaspaceShared::is_in_shared_space(klass), "archive methods must not be rewritten at run time");
|
||||
assert(!klass->is_shared(), "archive methods must not be rewritten at run time");
|
||||
}
|
||||
ResourceMark rm(THREAD);
|
||||
Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
|
||||
|
@ -278,7 +278,7 @@ void TemplateTable::initialize() {
|
||||
def(Bytecodes::_sipush , ubcp|____|____|____, vtos, itos, sipush , _ );
|
||||
def(Bytecodes::_ldc , ubcp|____|clvm|____, vtos, vtos, ldc , false );
|
||||
def(Bytecodes::_ldc_w , ubcp|____|clvm|____, vtos, vtos, ldc , true );
|
||||
def(Bytecodes::_ldc2_w , ubcp|____|____|____, vtos, vtos, ldc2_w , _ );
|
||||
def(Bytecodes::_ldc2_w , ubcp|____|clvm|____, vtos, vtos, ldc2_w , _ );
|
||||
def(Bytecodes::_iload , ubcp|____|clvm|____, vtos, itos, iload , _ );
|
||||
def(Bytecodes::_lload , ubcp|____|____|____, vtos, ltos, lload , _ );
|
||||
def(Bytecodes::_fload , ubcp|____|____|____, vtos, ftos, fload , _ );
|
||||
|
@ -295,6 +295,7 @@ class TemplateTable: AllStatic {
|
||||
static void getstatic(int byte_no);
|
||||
static void putstatic(int byte_no);
|
||||
static void pop_and_check_object(Register obj);
|
||||
static void condy_helper(Label& Done); // shared by ldc instances
|
||||
|
||||
static void _new();
|
||||
static void newarray();
|
||||
|
@ -766,11 +766,10 @@ C2V_END
|
||||
|
||||
C2V_VMENTRY(jboolean, isCompilable,(JNIEnv *, jobject, jobject jvmci_method))
|
||||
methodHandle method = CompilerToVM::asMethod(jvmci_method);
|
||||
// Skip redefined methods
|
||||
if (method->is_old()) {
|
||||
return false;
|
||||
}
|
||||
return !method->is_not_compilable(CompLevel_full_optimization);
|
||||
constantPoolHandle cp = method->constMethod()->constants();
|
||||
assert(!cp.is_null(), "npe");
|
||||
// don't inline method when constant pool contains a CONSTANT_Dynamic
|
||||
return !method->is_not_compilable(CompLevel_full_optimization) && !cp->has_dynamic_constant();
|
||||
C2V_END
|
||||
|
||||
C2V_VMENTRY(jboolean, hasNeverInlineDirective,(JNIEnv *, jobject, jobject jvmci_method))
|
||||
|
@ -119,6 +119,7 @@
|
||||
nonstatic_field(ConstantPool, _tags, Array<u1>*) \
|
||||
nonstatic_field(ConstantPool, _pool_holder, InstanceKlass*) \
|
||||
nonstatic_field(ConstantPool, _length, int) \
|
||||
nonstatic_field(ConstantPool, _flags, int) \
|
||||
\
|
||||
nonstatic_field(ConstMethod, _constants, ConstantPool*) \
|
||||
nonstatic_field(ConstMethod, _flags, u2) \
|
||||
@ -415,6 +416,7 @@
|
||||
declare_constant(JVM_CONSTANT_UnresolvedClassInError) \
|
||||
declare_constant(JVM_CONSTANT_MethodHandleInError) \
|
||||
declare_constant(JVM_CONSTANT_MethodTypeInError) \
|
||||
declare_constant(JVM_CONSTANT_DynamicInError) \
|
||||
declare_constant(JVM_CONSTANT_InternalMax) \
|
||||
\
|
||||
declare_constant(ArrayData::array_len_off_set) \
|
||||
@ -452,6 +454,7 @@
|
||||
declare_constant(CodeInstaller::INVOKE_INVALID) \
|
||||
\
|
||||
declare_constant(ConstantPool::CPCACHE_INDEX_TAG) \
|
||||
declare_constant(ConstantPool::_has_dynamic_constant) \
|
||||
\
|
||||
declare_constant(ConstMethod::_has_linenumber_table) \
|
||||
declare_constant(ConstMethod::_has_localvariable_table) \
|
||||
|
@ -296,7 +296,7 @@ void LogConfiguration::disable_logging() {
|
||||
notify_update_listeners();
|
||||
}
|
||||
|
||||
void LogConfiguration::configure_stdout(LogLevelType level, bool exact_match, ...) {
|
||||
void LogConfiguration::configure_stdout(LogLevelType level, int exact_match, ...) {
|
||||
size_t i;
|
||||
va_list ap;
|
||||
LogTagLevelExpression expr;
|
||||
|
@ -102,7 +102,7 @@ class LogConfiguration : public AllStatic {
|
||||
// (exact_match=false is the same as "-Xlog:<tags>*=<level>", and exact_match=true is "-Xlog:<tags>=<level>").
|
||||
// Tags should be specified using the LOG_TAGS macro, e.g.
|
||||
// LogConfiguration::configure_stdout(LogLevel::<level>, <true/false>, LOG_TAGS(<tags>));
|
||||
static void configure_stdout(LogLevelType level, bool exact_match, ...);
|
||||
static void configure_stdout(LogLevelType level, int exact_match, ...);
|
||||
|
||||
// Parse command line configuration. Parameter 'opts' is the string immediately following the -Xlog: argument ("gc" for -Xlog:gc).
|
||||
static bool parse_command_line_arguments(const char* opts = "all");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -101,6 +101,7 @@
|
||||
LOG_TAG(objecttagging) \
|
||||
LOG_TAG(obsolete) \
|
||||
LOG_TAG(oopmap) \
|
||||
LOG_TAG(oopstorage) \
|
||||
LOG_TAG(os) \
|
||||
LOG_TAG(pagesize) \
|
||||
LOG_TAG(patch) \
|
||||
|
@ -40,7 +40,7 @@ class LogTagLevelExpression : public StackObj {
|
||||
static const size_t MaxCombinations = 256;
|
||||
|
||||
private:
|
||||
friend void LogConfiguration::configure_stdout(LogLevelType, bool, ...);
|
||||
friend void LogConfiguration::configure_stdout(LogLevelType, int, ...);
|
||||
|
||||
static const char* DefaultExpressionString;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,6 +37,9 @@
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
void* MetaspaceObj::_shared_metaspace_base = NULL;
|
||||
void* MetaspaceObj::_shared_metaspace_top = NULL;
|
||||
|
||||
void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
|
||||
void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
|
||||
void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
|
||||
@ -54,10 +57,6 @@ void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
|
||||
return Metaspace::allocate(loader_data, word_size, type, THREAD);
|
||||
}
|
||||
|
||||
bool MetaspaceObj::is_shared() const {
|
||||
return MetaspaceShared::is_in_shared_space(this);
|
||||
}
|
||||
|
||||
bool MetaspaceObj::is_metaspace_object() const {
|
||||
return Metaspace::contains((void*)this);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -227,9 +227,23 @@ class ClassLoaderData;
|
||||
class MetaspaceClosure;
|
||||
|
||||
class MetaspaceObj {
|
||||
friend class MetaspaceShared;
|
||||
// When CDS is enabled, all shared metaspace objects are mapped
|
||||
// into a single contiguous memory block, so we can use these
|
||||
// two pointers to quickly determine if something is in the
|
||||
// shared metaspace.
|
||||
//
|
||||
// When CDS is not enabled, both pointers are set to NULL.
|
||||
static void* _shared_metaspace_base; // (inclusive) low address
|
||||
static void* _shared_metaspace_top; // (exclusive) high address
|
||||
|
||||
public:
|
||||
bool is_metaspace_object() const;
|
||||
bool is_shared() const;
|
||||
bool is_shared() const {
|
||||
// If no shared metaspace regions are mapped, _shared_metaspace_{base,top} will
|
||||
// both be NULL and all values of p will be rejected quickly.
|
||||
return (((void*)this) < _shared_metaspace_top && ((void*)this) >= _shared_metaspace_base);
|
||||
}
|
||||
void print_address_on(outputStream* st) const; // nonvirtual address printing
|
||||
|
||||
#define METASPACE_OBJ_TYPES_DO(f) \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -659,7 +659,7 @@ ReservedSpace FileMapInfo::reserve_shared_memory() {
|
||||
static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode", "OptionalData",
|
||||
"String1", "String2", "OpenArchive1", "OpenArchive2" };
|
||||
|
||||
char* FileMapInfo::map_region(int i) {
|
||||
char* FileMapInfo::map_region(int i, char** top_ret) {
|
||||
assert(!MetaspaceShared::is_heap_region(i), "sanity");
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
|
||||
size_t used = si->_used;
|
||||
@ -686,6 +686,12 @@ char* FileMapInfo::map_region(int i) {
|
||||
MemTracker::record_virtual_memory_type((address)base, mtClassShared);
|
||||
#endif
|
||||
|
||||
|
||||
if (!verify_region_checksum(i)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*top_ret = base + size;
|
||||
return base;
|
||||
}
|
||||
|
||||
@ -1040,27 +1046,6 @@ bool FileMapInfo::validate_header() {
|
||||
return status;
|
||||
}
|
||||
|
||||
// The following method is provided to see whether a given pointer
|
||||
// falls in the mapped shared metadata space.
|
||||
// Param:
|
||||
// p, The given pointer
|
||||
// Return:
|
||||
// True if the p is within the mapped shared space, otherwise, false.
|
||||
bool FileMapInfo::is_in_shared_space(const void* p) {
|
||||
for (int i = 0; i < MetaspaceShared::num_non_heap_spaces; i++) {
|
||||
char *base;
|
||||
if (_header->_space[i]._used == 0) {
|
||||
continue;
|
||||
}
|
||||
base = _header->region_addr(i);
|
||||
if (p >= base && p < base + _header->_space[i]._used) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if a given address is within one of the shared regions
|
||||
bool FileMapInfo::is_in_shared_region(const void* p, int idx) {
|
||||
assert(idx == MetaspaceShared::ro ||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -248,7 +248,7 @@ public:
|
||||
int first_region_id, int max_num_regions);
|
||||
void write_bytes(const void* buffer, int count);
|
||||
void write_bytes_aligned(const void* buffer, int count);
|
||||
char* map_region(int i);
|
||||
char* map_region(int i, char** top_ret);
|
||||
void map_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void unmap_region(int i);
|
||||
@ -265,8 +265,6 @@ public:
|
||||
static void fail_stop(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
|
||||
static void fail_continue(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
|
||||
|
||||
// Return true if given address is in the mapped shared space.
|
||||
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
|
||||
bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
|
||||
void print_shared_spaces() NOT_CDS_RETURN;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -4070,7 +4070,7 @@ void Metaspace::print_on(outputStream* out) const {
|
||||
}
|
||||
|
||||
bool Metaspace::contains(const void* ptr) {
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
|
||||
if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
|
||||
return true;
|
||||
}
|
||||
return contains_non_shared(ptr);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -746,7 +746,7 @@ void MetaspaceShared::patch_cpp_vtable_pointers() {
|
||||
}
|
||||
|
||||
bool MetaspaceShared::is_valid_shared_method(const Method* m) {
|
||||
assert(is_in_shared_space(m), "must be");
|
||||
assert(is_in_shared_metaspace(m), "must be");
|
||||
return CppVtableCloner<Method>::is_valid_shared_object(m);
|
||||
}
|
||||
|
||||
@ -1819,11 +1819,6 @@ public:
|
||||
bool reading() const { return true; }
|
||||
};
|
||||
|
||||
// Return true if given address is in the mapped shared space.
|
||||
bool MetaspaceShared::is_in_shared_space(const void* p) {
|
||||
return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
|
||||
}
|
||||
|
||||
// Return true if given address is in the misc data region
|
||||
bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
|
||||
return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
|
||||
@ -1857,35 +1852,46 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
|
||||
|
||||
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
|
||||
|
||||
char* _ro_base = NULL;
|
||||
char* _rw_base = NULL;
|
||||
char* _mc_base = NULL;
|
||||
char* _md_base = NULL;
|
||||
char* _od_base = NULL;
|
||||
char* ro_base = NULL; char* ro_top;
|
||||
char* rw_base = NULL; char* rw_top;
|
||||
char* mc_base = NULL; char* mc_top;
|
||||
char* md_base = NULL; char* md_top;
|
||||
char* od_base = NULL; char* od_top;
|
||||
|
||||
// Map each shared region
|
||||
if ((_mc_base = mapinfo->map_region(mc)) != NULL &&
|
||||
mapinfo->verify_region_checksum(mc) &&
|
||||
(_rw_base = mapinfo->map_region(rw)) != NULL &&
|
||||
mapinfo->verify_region_checksum(rw) &&
|
||||
(_ro_base = mapinfo->map_region(ro)) != NULL &&
|
||||
mapinfo->verify_region_checksum(ro) &&
|
||||
(_md_base = mapinfo->map_region(md)) != NULL &&
|
||||
mapinfo->verify_region_checksum(md) &&
|
||||
(_od_base = mapinfo->map_region(od)) != NULL &&
|
||||
mapinfo->verify_region_checksum(od) &&
|
||||
if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL &&
|
||||
(rw_base = mapinfo->map_region(rw, &rw_top)) != NULL &&
|
||||
(ro_base = mapinfo->map_region(ro, &ro_top)) != NULL &&
|
||||
(md_base = mapinfo->map_region(md, &md_top)) != NULL &&
|
||||
(od_base = mapinfo->map_region(od, &od_top)) != NULL &&
|
||||
(image_alignment == (size_t)os::vm_allocation_granularity()) &&
|
||||
mapinfo->validate_classpath_entry_table()) {
|
||||
// Success (no need to do anything)
|
||||
// Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
|
||||
// fast checking in MetaspaceShared::is_in_shared_metaspace() and
|
||||
// MetaspaceObj::is_shared().
|
||||
//
|
||||
// We require that mc->rw->ro->md->od to be laid out consecutively, with no
|
||||
// gaps between them. That way, we can ensure that the OS won't be able to
|
||||
// allocate any new memory spaces inside _shared_metaspace_{base,top}, which
|
||||
// would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
|
||||
assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be");
|
||||
assert(od_top > ro_top && od_top > rw_top && od_top > md_top && od_top > mc_top , "must be");
|
||||
assert(mc_top == rw_base, "must be");
|
||||
assert(rw_top == ro_base, "must be");
|
||||
assert(ro_top == md_base, "must be");
|
||||
assert(md_top == od_base, "must be");
|
||||
|
||||
MetaspaceObj::_shared_metaspace_base = (void*)mc_base;
|
||||
MetaspaceObj::_shared_metaspace_top = (void*)od_top;
|
||||
return true;
|
||||
} else {
|
||||
// If there was a failure in mapping any of the spaces, unmap the ones
|
||||
// that succeeded
|
||||
if (_ro_base != NULL) mapinfo->unmap_region(ro);
|
||||
if (_rw_base != NULL) mapinfo->unmap_region(rw);
|
||||
if (_mc_base != NULL) mapinfo->unmap_region(mc);
|
||||
if (_md_base != NULL) mapinfo->unmap_region(md);
|
||||
if (_od_base != NULL) mapinfo->unmap_region(od);
|
||||
if (ro_base != NULL) mapinfo->unmap_region(ro);
|
||||
if (rw_base != NULL) mapinfo->unmap_region(rw);
|
||||
if (mc_base != NULL) mapinfo->unmap_region(mc);
|
||||
if (md_base != NULL) mapinfo->unmap_region(md);
|
||||
if (od_base != NULL) mapinfo->unmap_region(od);
|
||||
#ifndef _WINDOWS
|
||||
// Release the entire mapped region
|
||||
shared_rs.release();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -164,8 +164,13 @@ class MetaspaceShared : AllStatic {
|
||||
static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
|
||||
static void initialize_shared_spaces() NOT_CDS_RETURN;
|
||||
|
||||
// Return true if given address is in the mapped shared space.
|
||||
static bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
|
||||
// Return true if given address is in the shared metaspace regions (i.e., excluding any
|
||||
// mapped shared heap regions.)
|
||||
static bool is_in_shared_metaspace(const void* p) {
|
||||
// If no shared metaspace regions are mapped, MetaspceObj::_shared_metaspace_{base,top} will
|
||||
// both be NULL and all values of p will be rejected quickly.
|
||||
return (p < MetaspaceObj::_shared_metaspace_top && p >= MetaspaceObj::_shared_metaspace_base);
|
||||
}
|
||||
|
||||
// Return true if given address is in the shared region corresponding to the idx
|
||||
static bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
|
||||
|
@ -111,6 +111,7 @@ oop Universe::_main_thread_group = NULL;
|
||||
oop Universe::_system_thread_group = NULL;
|
||||
objArrayOop Universe::_the_empty_class_klass_array = NULL;
|
||||
Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
|
||||
oop Universe::_the_null_sentinel = NULL;
|
||||
oop Universe::_the_null_string = NULL;
|
||||
oop Universe::_the_min_jint_string = NULL;
|
||||
LatestMethodCache* Universe::_finalizer_register_cache = NULL;
|
||||
@ -195,6 +196,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
|
||||
assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
|
||||
|
||||
f->do_oop((oop*)&_the_empty_class_klass_array);
|
||||
f->do_oop((oop*)&_the_null_sentinel);
|
||||
f->do_oop((oop*)&_the_null_string);
|
||||
f->do_oop((oop*)&_the_min_jint_string);
|
||||
f->do_oop((oop*)&_out_of_memory_error_java_heap);
|
||||
@ -381,6 +383,11 @@ void Universe::genesis(TRAPS) {
|
||||
initialize_basic_type_klass(longArrayKlassObj(), CHECK);
|
||||
} // end of core bootstrapping
|
||||
|
||||
{
|
||||
Handle tns = java_lang_String::create_from_str("<null_sentinel>", CHECK);
|
||||
_the_null_sentinel = tns();
|
||||
}
|
||||
|
||||
// Maybe this could be lifted up now that object array can be initialized
|
||||
// during the bootstrapping.
|
||||
|
||||
|
@ -141,6 +141,7 @@ class Universe: AllStatic {
|
||||
static oop _system_thread_group; // Reference to the system thread group object
|
||||
|
||||
static objArrayOop _the_empty_class_klass_array; // Canonicalized obj array of type java.lang.Class
|
||||
static oop _the_null_sentinel; // A unique object pointer unused except as a sentinel for null.
|
||||
static oop _the_null_string; // A cache of "null" as a Java string
|
||||
static oop _the_min_jint_string; // A cache of "-2147483648" as a Java string
|
||||
static LatestMethodCache* _finalizer_register_cache; // static method for registering finalizable objects
|
||||
@ -322,6 +323,9 @@ class Universe: AllStatic {
|
||||
|
||||
static Method* do_stack_walk_method() { return _do_stack_walk_cache->get_method(); }
|
||||
|
||||
static oop the_null_sentinel() { return _the_null_sentinel; }
|
||||
static address the_null_sentinel_addr() { return (address) &_the_null_sentinel; }
|
||||
|
||||
// Function to initialize these
|
||||
static void initialize_known_methods(TRAPS);
|
||||
|
||||
|
@ -200,12 +200,14 @@ const DecoratorSet IN_HEAP = UCONST64(1) << 18;
|
||||
const DecoratorSet IN_HEAP_ARRAY = UCONST64(1) << 19;
|
||||
const DecoratorSet IN_ROOT = UCONST64(1) << 20;
|
||||
const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 21;
|
||||
const DecoratorSet IN_ARCHIVE_ROOT = UCONST64(1) << 22;
|
||||
const DecoratorSet IN_DECORATOR_MASK = IN_HEAP | IN_HEAP_ARRAY |
|
||||
IN_ROOT | IN_CONCURRENT_ROOT;
|
||||
IN_ROOT | IN_CONCURRENT_ROOT |
|
||||
IN_ARCHIVE_ROOT;
|
||||
|
||||
// == Value Decorators ==
|
||||
// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
|
||||
const DecoratorSet OOP_NOT_NULL = UCONST64(1) << 22;
|
||||
const DecoratorSet OOP_NOT_NULL = UCONST64(1) << 23;
|
||||
const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
|
||||
|
||||
// == Arraycopy Decorators ==
|
||||
|
@ -788,7 +788,9 @@ namespace AccessInternal {
|
||||
((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
|
||||
static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
|
||||
((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
|
||||
static const DecoratorSet value = conc_root_is_root | BT_BUILDTIME_DECORATORS;
|
||||
static const DecoratorSet archive_root_is_root = conc_root_is_root |
|
||||
((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
|
||||
static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
|
||||
};
|
||||
|
||||
// Step 2: Reduce types.
|
||||
@ -1082,7 +1084,8 @@ void Access<decorators>::verify_decorators() {
|
||||
(location_decorators ^ IN_ROOT) == 0 ||
|
||||
(location_decorators ^ IN_HEAP) == 0 ||
|
||||
(location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
|
||||
(location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0
|
||||
(location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
|
||||
(location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -172,18 +172,3 @@ namespace AccessInternal {
|
||||
Copy::conjoint_jlongs_atomic(src, dst, length);
|
||||
}
|
||||
}
|
||||
|
||||
template void AccessInternal::arraycopy_conjoint<jbyte>(jbyte* src, jbyte* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_conjoint<jshort>(jshort* src, jshort* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_conjoint<jint>(jint* src, jint* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_conjoint<jlong>(jlong* src, jlong* dst, size_t length);
|
||||
|
||||
template void AccessInternal::arraycopy_arrayof_conjoint<jbyte>(jbyte* src, jbyte* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_arrayof_conjoint<jshort>(jshort* src, jshort* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_arrayof_conjoint<jint>(jint* src, jint* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_arrayof_conjoint<jlong>(jlong* src, jlong* dst, size_t length);
|
||||
|
||||
template void AccessInternal::arraycopy_conjoint_atomic<jbyte>(jbyte* src, jbyte* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_conjoint_atomic<jshort>(jshort* src, jshort* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_conjoint_atomic<jint>(jint* src, jint* dst, size_t length);
|
||||
template void AccessInternal::arraycopy_conjoint_atomic<jlong>(jlong* src, jlong* dst, size_t length);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -49,9 +49,6 @@
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/vframe.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
|
||||
Array<u1>* tags = MetadataFactory::new_array<u1>(loader_data, length, 0, CHECK_NULL);
|
||||
@ -333,13 +330,8 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
|
||||
if (MetaspaceShared::open_archive_heap_region_mapped() &&
|
||||
_cache->archived_references() != NULL) {
|
||||
oop archived = _cache->archived_references();
|
||||
// Make sure GC knows the cached object is now live. This is necessary after
|
||||
// initial GC marking and during concurrent marking as strong roots are only
|
||||
// scanned during initial marking (at the start of the GC marking).
|
||||
assert(UseG1GC, "Requires G1 GC");
|
||||
G1SATBCardTableModRefBS::enqueue(archived);
|
||||
// Create handle for the archived resolved reference array object
|
||||
Handle refs_handle(THREAD, (oop)archived);
|
||||
Handle refs_handle(THREAD, archived);
|
||||
set_resolved_references(loader_data->add_handle(refs_handle));
|
||||
} else
|
||||
#endif
|
||||
@ -615,7 +607,6 @@ Symbol* ConstantPool::impl_signature_ref_at(int which, bool uncached) {
|
||||
return symbol_at(signature_index);
|
||||
}
|
||||
|
||||
|
||||
int ConstantPool::impl_name_and_type_ref_index_at(int which, bool uncached) {
|
||||
int i = which;
|
||||
if (!uncached && cache() != NULL) {
|
||||
@ -629,14 +620,18 @@ int ConstantPool::impl_name_and_type_ref_index_at(int which, bool uncached) {
|
||||
// change byte-ordering and go via cache
|
||||
i = remap_instruction_operand_from_cache(which);
|
||||
} else {
|
||||
if (tag_at(which).is_invoke_dynamic()) {
|
||||
if (tag_at(which).is_invoke_dynamic() ||
|
||||
tag_at(which).is_dynamic_constant() ||
|
||||
tag_at(which).is_dynamic_constant_in_error()) {
|
||||
int pool_index = invoke_dynamic_name_and_type_ref_index_at(which);
|
||||
assert(tag_at(pool_index).is_name_and_type(), "");
|
||||
return pool_index;
|
||||
}
|
||||
}
|
||||
assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
|
||||
assert(!tag_at(i).is_invoke_dynamic(), "Must be handled above");
|
||||
assert(!tag_at(i).is_invoke_dynamic() &&
|
||||
!tag_at(i).is_dynamic_constant() &&
|
||||
!tag_at(i).is_dynamic_constant_in_error(), "Must be handled above");
|
||||
jint ref_index = *int_at_addr(i);
|
||||
return extract_high_short_from_int(ref_index);
|
||||
}
|
||||
@ -680,16 +675,12 @@ int ConstantPool::remap_instruction_operand_from_cache(int operand) {
|
||||
|
||||
|
||||
void ConstantPool::verify_constant_pool_resolve(const constantPoolHandle& this_cp, Klass* k, TRAPS) {
|
||||
if (k->is_instance_klass() || k->is_objArray_klass()) {
|
||||
InstanceKlass* holder = this_cp->pool_holder();
|
||||
Klass* elem = k->is_instance_klass() ? k : ObjArrayKlass::cast(k)->bottom_klass();
|
||||
|
||||
// The element type could be a typeArray - we only need the access check if it is
|
||||
// an reference to another class
|
||||
if (elem->is_instance_klass()) {
|
||||
LinkResolver::check_klass_accessability(holder, elem, CHECK);
|
||||
}
|
||||
if (!(k->is_instance_klass() || k->is_objArray_klass())) {
|
||||
return; // short cut, typeArray klass is always accessible
|
||||
}
|
||||
Klass* holder = this_cp->pool_holder();
|
||||
bool fold_type_to_class = true;
|
||||
LinkResolver::check_klass_accessability(holder, k, fold_type_to_class, CHECK);
|
||||
}
|
||||
|
||||
|
||||
@ -777,8 +768,8 @@ void ConstantPool::throw_resolution_error(const constantPoolHandle& this_cp, int
|
||||
THROW_MSG(error, message->as_C_string());
|
||||
}
|
||||
|
||||
// If resolution for Class, MethodHandle or MethodType fails, save the exception
|
||||
// in the resolution error table, so that the same exception is thrown again.
|
||||
// If resolution for Class, Dynamic constant, MethodHandle or MethodType fails, save the
|
||||
// exception in the resolution error table, so that the same exception is thrown again.
|
||||
void ConstantPool::save_and_throw_exception(const constantPoolHandle& this_cp, int which,
|
||||
constantTag tag, TRAPS) {
|
||||
Symbol* error = PENDING_EXCEPTION->klass()->name();
|
||||
@ -814,16 +805,31 @@ void ConstantPool::save_and_throw_exception(const constantPoolHandle& this_cp, i
|
||||
}
|
||||
}
|
||||
|
||||
BasicType ConstantPool::basic_type_for_constant_at(int which) {
|
||||
constantTag tag = tag_at(which);
|
||||
if (tag.is_dynamic_constant() ||
|
||||
tag.is_dynamic_constant_in_error()) {
|
||||
// have to look at the signature for this one
|
||||
Symbol* constant_type = uncached_signature_ref_at(which);
|
||||
return FieldType::basic_type(constant_type);
|
||||
}
|
||||
return tag.basic_type();
|
||||
}
|
||||
|
||||
// Called to resolve constants in the constant pool and return an oop.
|
||||
// Some constant pool entries cache their resolved oop. This is also
|
||||
// called to create oops from constants to use in arguments for invokedynamic
|
||||
oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index, TRAPS) {
|
||||
oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
|
||||
int index, int cache_index,
|
||||
bool* status_return, TRAPS) {
|
||||
oop result_oop = NULL;
|
||||
Handle throw_exception;
|
||||
|
||||
if (cache_index == _possible_index_sentinel) {
|
||||
// It is possible that this constant is one which is cached in the objects.
|
||||
// We'll do a linear search. This should be OK because this usage is rare.
|
||||
// FIXME: If bootstrap specifiers stress this code, consider putting in
|
||||
// a reverse index. Binary search over a short array should do it.
|
||||
assert(index > 0, "valid index");
|
||||
cache_index = this_cp->cp_to_object_index(index);
|
||||
}
|
||||
@ -833,6 +839,12 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, in
|
||||
if (cache_index >= 0) {
|
||||
result_oop = this_cp->resolved_references()->obj_at(cache_index);
|
||||
if (result_oop != NULL) {
|
||||
if (result_oop == Universe::the_null_sentinel()) {
|
||||
DEBUG_ONLY(int temp_index = (index >= 0 ? index : this_cp->object_to_cp_index(cache_index)));
|
||||
assert(this_cp->tag_at(temp_index).is_dynamic_constant(), "only condy uses the null sentinel");
|
||||
result_oop = NULL;
|
||||
}
|
||||
if (status_return != NULL) (*status_return) = true;
|
||||
return result_oop;
|
||||
// That was easy...
|
||||
}
|
||||
@ -843,6 +855,35 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, in
|
||||
|
||||
constantTag tag = this_cp->tag_at(index);
|
||||
|
||||
if (status_return != NULL) {
|
||||
// don't trigger resolution if the constant might need it
|
||||
switch (tag.value()) {
|
||||
case JVM_CONSTANT_Class:
|
||||
{
|
||||
CPKlassSlot kslot = this_cp->klass_slot_at(index);
|
||||
int resolved_klass_index = kslot.resolved_klass_index();
|
||||
if (this_cp->resolved_klasses()->at(resolved_klass_index) == NULL) {
|
||||
(*status_return) = false;
|
||||
return NULL;
|
||||
}
|
||||
// the klass is waiting in the CP; go get it
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_String:
|
||||
case JVM_CONSTANT_Integer:
|
||||
case JVM_CONSTANT_Float:
|
||||
case JVM_CONSTANT_Long:
|
||||
case JVM_CONSTANT_Double:
|
||||
// these guys trigger OOM at worst
|
||||
break;
|
||||
default:
|
||||
(*status_return) = false;
|
||||
return NULL;
|
||||
}
|
||||
// from now on there is either success or an OOME
|
||||
(*status_return) = true;
|
||||
}
|
||||
|
||||
switch (tag.value()) {
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClass:
|
||||
@ -856,6 +897,63 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, in
|
||||
break;
|
||||
}
|
||||
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
{
|
||||
Klass* current_klass = this_cp->pool_holder();
|
||||
Symbol* constant_name = this_cp->uncached_name_ref_at(index);
|
||||
Symbol* constant_type = this_cp->uncached_signature_ref_at(index);
|
||||
|
||||
// The initial step in resolving an unresolved symbolic reference to a
|
||||
// dynamically-computed constant is to resolve the symbolic reference to a
|
||||
// method handle which will be the bootstrap method for the dynamically-computed
|
||||
// constant. If resolution of the java.lang.invoke.MethodHandle for the bootstrap
|
||||
// method fails, then a MethodHandleInError is stored at the corresponding
|
||||
// bootstrap method's CP index for the CONSTANT_MethodHandle_info. No need to
|
||||
// set a DynamicConstantInError here since any subsequent use of this
|
||||
// bootstrap method will encounter the resolution of MethodHandleInError.
|
||||
oop bsm_info = this_cp->resolve_bootstrap_specifier_at(index, THREAD);
|
||||
Exceptions::wrap_dynamic_exception(CHECK_NULL);
|
||||
assert(bsm_info != NULL, "");
|
||||
// FIXME: Cache this once per BootstrapMethods entry, not once per CONSTANT_Dynamic.
|
||||
Handle bootstrap_specifier = Handle(THREAD, bsm_info);
|
||||
|
||||
// Resolve the Dynamically-Computed constant to invoke the BSM in order to obtain the resulting oop.
|
||||
Handle value = SystemDictionary::link_dynamic_constant(current_klass,
|
||||
index,
|
||||
bootstrap_specifier,
|
||||
constant_name,
|
||||
constant_type,
|
||||
THREAD);
|
||||
result_oop = value();
|
||||
Exceptions::wrap_dynamic_exception(THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
// Resolution failure of the dynamically-computed constant, save_and_throw_exception
|
||||
// will check for a LinkageError and store a DynamicConstantInError.
|
||||
save_and_throw_exception(this_cp, index, tag, CHECK_NULL);
|
||||
}
|
||||
BasicType type = FieldType::basic_type(constant_type);
|
||||
if (!is_reference_type(type)) {
|
||||
// Make sure the primitive value is properly boxed.
|
||||
// This is a JDK responsibility.
|
||||
const char* fail = NULL;
|
||||
if (result_oop == NULL) {
|
||||
fail = "null result instead of box";
|
||||
} else if (!is_java_primitive(type)) {
|
||||
// FIXME: support value types via unboxing
|
||||
fail = "can only handle references and primitives";
|
||||
} else if (!java_lang_boxing_object::is_instance(result_oop, type)) {
|
||||
fail = "primitive is not properly boxed";
|
||||
}
|
||||
if (fail != NULL) {
|
||||
// Since this exception is not a LinkageError, throw exception
|
||||
// but do not save a DynamicInError resolution result.
|
||||
// See section 5.4.3 of the VM spec.
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), fail);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case JVM_CONSTANT_String:
|
||||
assert(cache_index != _no_index_sentinel, "should have been set");
|
||||
if (this_cp->is_pseudo_string_at(index)) {
|
||||
@ -865,6 +963,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, in
|
||||
result_oop = string_at_impl(this_cp, index, cache_index, CHECK_NULL);
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_DynamicInError:
|
||||
case JVM_CONSTANT_MethodHandleInError:
|
||||
case JVM_CONSTANT_MethodTypeInError:
|
||||
{
|
||||
@ -965,15 +1064,20 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, in
|
||||
// The important thing here is that all threads pick up the same result.
|
||||
// It doesn't matter which racing thread wins, as long as only one
|
||||
// result is used by all threads, and all future queries.
|
||||
oop old_result = this_cp->resolved_references()->atomic_compare_exchange_oop(cache_index, result_oop, NULL);
|
||||
oop new_result = (result_oop == NULL ? Universe::the_null_sentinel() : result_oop);
|
||||
oop old_result = this_cp->resolved_references()
|
||||
->atomic_compare_exchange_oop(cache_index, new_result, NULL);
|
||||
if (old_result == NULL) {
|
||||
return result_oop; // was installed
|
||||
} else {
|
||||
// Return the winning thread's result. This can be different than
|
||||
// the result here for MethodHandles.
|
||||
if (old_result == Universe::the_null_sentinel())
|
||||
old_result = NULL;
|
||||
return old_result;
|
||||
}
|
||||
} else {
|
||||
assert(result_oop != Universe::the_null_sentinel(), "");
|
||||
return result_oop;
|
||||
}
|
||||
}
|
||||
@ -987,13 +1091,14 @@ oop ConstantPool::uncached_string_at(int which, TRAPS) {
|
||||
|
||||
|
||||
oop ConstantPool::resolve_bootstrap_specifier_at_impl(const constantPoolHandle& this_cp, int index, TRAPS) {
|
||||
assert(this_cp->tag_at(index).is_invoke_dynamic(), "Corrupted constant pool");
|
||||
|
||||
assert((this_cp->tag_at(index).is_invoke_dynamic() ||
|
||||
this_cp->tag_at(index).is_dynamic_constant()), "Corrupted constant pool");
|
||||
Handle bsm;
|
||||
int argc;
|
||||
{
|
||||
// JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type], plus optional arguments
|
||||
// The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
|
||||
// JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&mtype], plus optional arguments
|
||||
// JVM_CONSTANT_Dynamic is an ordered pair of [bootm, name&ftype], plus optional arguments
|
||||
// In both cases, the bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
|
||||
// It is accompanied by the optional arguments.
|
||||
int bsm_index = this_cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
|
||||
oop bsm_oop = this_cp->resolve_possibly_cached_constant_at(bsm_index, CHECK_NULL);
|
||||
@ -1003,30 +1108,142 @@ oop ConstantPool::resolve_bootstrap_specifier_at_impl(const constantPoolHandle&
|
||||
|
||||
// Extract the optional static arguments.
|
||||
argc = this_cp->invoke_dynamic_argument_count_at(index);
|
||||
if (argc == 0) return bsm_oop;
|
||||
|
||||
// if there are no static arguments, return the bsm by itself:
|
||||
if (argc == 0 && UseBootstrapCallInfo < 2) return bsm_oop;
|
||||
|
||||
bsm = Handle(THREAD, bsm_oop);
|
||||
}
|
||||
|
||||
// We are going to return an ordered pair of {bsm, info}, using a 2-array.
|
||||
objArrayHandle info;
|
||||
{
|
||||
objArrayOop info_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1+argc, CHECK_NULL);
|
||||
objArrayOop info_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), 2, CHECK_NULL);
|
||||
info = objArrayHandle(THREAD, info_oop);
|
||||
}
|
||||
|
||||
info->obj_at_put(0, bsm());
|
||||
for (int i = 0; i < argc; i++) {
|
||||
int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
|
||||
oop arg_oop = this_cp->resolve_possibly_cached_constant_at(arg_index, CHECK_NULL);
|
||||
info->obj_at_put(1+i, arg_oop);
|
||||
|
||||
bool use_BSCI;
|
||||
switch (UseBootstrapCallInfo) {
|
||||
default: use_BSCI = true; break; // stress mode
|
||||
case 0: use_BSCI = false; break; // stress mode
|
||||
case 1: // normal mode
|
||||
// If we were to support an alternative mode of BSM invocation,
|
||||
// we'd convert to pull mode here if the BSM could be a candidate
|
||||
// for that alternative mode. We can't easily test for things
|
||||
// like varargs here, but we can get away with approximate testing,
|
||||
// since the JDK runtime will make up the difference either way.
|
||||
// For now, exercise the pull-mode path if the BSM is of arity 2,
|
||||
// or if there is a potential condy loop (see below).
|
||||
oop mt_oop = java_lang_invoke_MethodHandle::type(bsm());
|
||||
use_BSCI = (java_lang_invoke_MethodType::ptype_count(mt_oop) == 2);
|
||||
break;
|
||||
}
|
||||
|
||||
// Here's a reason to use BSCI even if it wasn't requested:
|
||||
// If a condy uses a condy argument, we want to avoid infinite
|
||||
// recursion (condy loops) in the C code. It's OK in Java,
|
||||
// because Java has stack overflow checking, so we punt
|
||||
// potentially cyclic cases from C to Java.
|
||||
if (!use_BSCI && this_cp->tag_at(index).is_dynamic_constant()) {
|
||||
bool found_unresolved_condy = false;
|
||||
for (int i = 0; i < argc; i++) {
|
||||
int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
|
||||
if (this_cp->tag_at(arg_index).is_dynamic_constant()) {
|
||||
// potential recursion point condy -> condy
|
||||
bool found_it = false;
|
||||
this_cp->find_cached_constant_at(arg_index, found_it, CHECK_NULL);
|
||||
if (!found_it) { found_unresolved_condy = true; break; }
|
||||
}
|
||||
}
|
||||
if (found_unresolved_condy)
|
||||
use_BSCI = true;
|
||||
}
|
||||
|
||||
const int SMALL_ARITY = 5;
|
||||
if (use_BSCI && argc <= SMALL_ARITY && UseBootstrapCallInfo <= 2) {
|
||||
// If there are only a few arguments, and none of them need linking,
|
||||
// push them, instead of asking the JDK runtime to turn around and
|
||||
// pull them, saving a JVM/JDK transition in some simple cases.
|
||||
bool all_resolved = true;
|
||||
for (int i = 0; i < argc; i++) {
|
||||
bool found_it = false;
|
||||
int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
|
||||
this_cp->find_cached_constant_at(arg_index, found_it, CHECK_NULL);
|
||||
if (!found_it) { all_resolved = false; break; }
|
||||
}
|
||||
if (all_resolved)
|
||||
use_BSCI = false;
|
||||
}
|
||||
|
||||
if (!use_BSCI) {
|
||||
// return {bsm, {arg...}}; resolution of arguments is done immediately, before JDK code is called
|
||||
objArrayOop args_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), argc, CHECK_NULL);
|
||||
info->obj_at_put(1, args_oop); // may overwrite with args[0] below
|
||||
objArrayHandle args(THREAD, args_oop);
|
||||
copy_bootstrap_arguments_at_impl(this_cp, index, 0, argc, args, 0, true, Handle(), CHECK_NULL);
|
||||
if (argc == 1) {
|
||||
// try to discard the singleton array
|
||||
oop arg_oop = args->obj_at(0);
|
||||
if (arg_oop != NULL && !arg_oop->is_array()) {
|
||||
// JVM treats arrays and nulls specially in this position,
|
||||
// but other things are just single arguments
|
||||
info->obj_at_put(1, arg_oop);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// return {bsm, {arg_count, pool_index}}; JDK code must pull the arguments as needed
|
||||
typeArrayOop ints_oop = oopFactory::new_typeArray(T_INT, 2, CHECK_NULL);
|
||||
ints_oop->int_at_put(0, argc);
|
||||
ints_oop->int_at_put(1, index);
|
||||
info->obj_at_put(1, ints_oop);
|
||||
}
|
||||
return info();
|
||||
}
|
||||
|
||||
void ConstantPool::copy_bootstrap_arguments_at_impl(const constantPoolHandle& this_cp, int index,
|
||||
int start_arg, int end_arg,
|
||||
objArrayHandle info, int pos,
|
||||
bool must_resolve, Handle if_not_available,
|
||||
TRAPS) {
|
||||
int argc;
|
||||
int limit = pos + end_arg - start_arg;
|
||||
// checks: index in range [0..this_cp->length),
|
||||
// tag at index, start..end in range [0..argc],
|
||||
// info array non-null, pos..limit in [0..info.length]
|
||||
if ((0 >= index || index >= this_cp->length()) ||
|
||||
!(this_cp->tag_at(index).is_invoke_dynamic() ||
|
||||
this_cp->tag_at(index).is_dynamic_constant()) ||
|
||||
(0 > start_arg || start_arg > end_arg) ||
|
||||
(end_arg > (argc = this_cp->invoke_dynamic_argument_count_at(index))) ||
|
||||
(0 > pos || pos > limit) ||
|
||||
(info.is_null() || limit > info->length())) {
|
||||
// An index or something else went wrong; throw an error.
|
||||
// Since this is an internal API, we don't expect this,
|
||||
// so we don't bother to craft a nice message.
|
||||
THROW_MSG(vmSymbols::java_lang_LinkageError(), "bad BSM argument access");
|
||||
}
|
||||
// now we can loop safely
|
||||
int info_i = pos;
|
||||
for (int i = start_arg; i < end_arg; i++) {
|
||||
int arg_index = this_cp->invoke_dynamic_argument_index_at(index, i);
|
||||
oop arg_oop;
|
||||
if (must_resolve) {
|
||||
arg_oop = this_cp->resolve_possibly_cached_constant_at(arg_index, CHECK);
|
||||
} else {
|
||||
bool found_it = false;
|
||||
arg_oop = this_cp->find_cached_constant_at(arg_index, found_it, CHECK);
|
||||
if (!found_it) arg_oop = if_not_available();
|
||||
}
|
||||
info->obj_at_put(info_i++, arg_oop);
|
||||
}
|
||||
}
|
||||
|
||||
oop ConstantPool::string_at_impl(const constantPoolHandle& this_cp, int which, int obj_index, TRAPS) {
|
||||
// If the string has already been interned, this entry will be non-null
|
||||
oop str = this_cp->resolved_references()->obj_at(obj_index);
|
||||
assert(str != Universe::the_null_sentinel(), "");
|
||||
if (str != NULL) return str;
|
||||
Symbol* sym = this_cp->unresolved_string_at(which);
|
||||
str = StringTable::intern(sym, CHECK_(NULL));
|
||||
@ -1207,6 +1424,18 @@ bool ConstantPool::compare_entry_to(int index1, const constantPoolHandle& cp2,
|
||||
}
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
{
|
||||
int k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
|
||||
int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
|
||||
int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
|
||||
int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
|
||||
// separate statements and variables because CHECK_false is used
|
||||
bool match_entry = compare_entry_to(k1, cp2, k2, CHECK_false);
|
||||
bool match_operand = compare_operand_to(i1, cp2, i2, CHECK_false);
|
||||
return (match_entry && match_operand);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
{
|
||||
int k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
|
||||
@ -1533,6 +1762,15 @@ void ConstantPool::copy_entry_to(const constantPoolHandle& from_cp, int from_i,
|
||||
to_cp->method_handle_index_at_put(to_i, k1, k2);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
case JVM_CONSTANT_DynamicInError:
|
||||
{
|
||||
int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i);
|
||||
int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i);
|
||||
k1 += operand_array_length(to_cp->operands()); // to_cp might already have operands
|
||||
to_cp->dynamic_constant_at_put(to_i, k1, k2);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
{
|
||||
int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i);
|
||||
@ -1794,6 +2032,8 @@ jint ConstantPool::cpool_entry_size(jint idx) {
|
||||
case JVM_CONSTANT_NameAndType:
|
||||
return 5;
|
||||
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
case JVM_CONSTANT_DynamicInError:
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
// u1 tag, u2 bsm, u2 nt
|
||||
return 5;
|
||||
@ -1979,6 +2219,17 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
|
||||
DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_Dynamic:
|
||||
case JVM_CONSTANT_DynamicInError: {
|
||||
*bytes = tag;
|
||||
idx1 = extract_low_short_from_int(*int_at_addr(idx));
|
||||
idx2 = extract_high_short_from_int(*int_at_addr(idx));
|
||||
assert(idx2 == invoke_dynamic_name_and_type_ref_index_at(idx), "correct half of u4");
|
||||
Bytes::put_Java_u2((address) (bytes+1), idx1);
|
||||
Bytes::put_Java_u2((address) (bytes+3), idx2);
|
||||
DBG(printf("JVM_CONSTANT_Dynamic: %hd %hd", idx1, idx2));
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_InvokeDynamic: {
|
||||
*bytes = tag;
|
||||
idx1 = extract_low_short_from_int(*int_at_addr(idx));
|
||||
@ -2184,6 +2435,21 @@ void ConstantPool::print_entry_on(const int index, outputStream* st) {
|
||||
case JVM_CONSTANT_MethodTypeInError :
|
||||
st->print("signature_index=%d", method_type_index_at(index));
|
||||
break;
|
||||
case JVM_CONSTANT_Dynamic :
|
||||
case JVM_CONSTANT_DynamicInError :
|
||||
{
|
||||
st->print("bootstrap_method_index=%d", invoke_dynamic_bootstrap_method_ref_index_at(index));
|
||||
st->print(" type_index=%d", invoke_dynamic_name_and_type_ref_index_at(index));
|
||||
int argc = invoke_dynamic_argument_count_at(index);
|
||||
if (argc > 0) {
|
||||
for (int arg_i = 0; arg_i < argc; arg_i++) {
|
||||
int arg = invoke_dynamic_argument_index_at(index, arg_i);
|
||||
st->print((arg_i == 0 ? " arguments={%d" : ", %d"), arg);
|
||||
}
|
||||
st->print("}");
|
||||
}
|
||||
}
|
||||
break;
|
||||
case JVM_CONSTANT_InvokeDynamic :
|
||||
{
|
||||
st->print("bootstrap_method_index=%d", invoke_dynamic_bootstrap_method_ref_index_at(index));
|
||||
|
@ -113,9 +113,10 @@ class ConstantPool : public Metadata {
|
||||
Array<Klass*>* _resolved_klasses;
|
||||
|
||||
enum {
|
||||
_has_preresolution = 1, // Flags
|
||||
_on_stack = 2,
|
||||
_is_shared = 4
|
||||
_has_preresolution = 1, // Flags
|
||||
_on_stack = 2,
|
||||
_is_shared = 4,
|
||||
_has_dynamic_constant = 8
|
||||
};
|
||||
|
||||
int _flags; // old fashioned bit twiddling
|
||||
@ -207,6 +208,9 @@ class ConstantPool : public Metadata {
|
||||
// Faster than MetaspaceObj::is_shared() - used by set_on_stack()
|
||||
bool is_shared() const { return (_flags & _is_shared) != 0; }
|
||||
|
||||
bool has_dynamic_constant() const { return (_flags & _has_dynamic_constant) != 0; }
|
||||
void set_has_dynamic_constant() { _flags |= _has_dynamic_constant; }
|
||||
|
||||
// Klass holding pool
|
||||
InstanceKlass* pool_holder() const { return _pool_holder; }
|
||||
void set_pool_holder(InstanceKlass* k) { _pool_holder = k; }
|
||||
@ -297,6 +301,11 @@ class ConstantPool : public Metadata {
|
||||
*int_at_addr(which) = ref_index;
|
||||
}
|
||||
|
||||
void dynamic_constant_at_put(int which, int bootstrap_specifier_index, int name_and_type_index) {
|
||||
tag_at_put(which, JVM_CONSTANT_Dynamic);
|
||||
*int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index;
|
||||
}
|
||||
|
||||
void invoke_dynamic_at_put(int which, int bootstrap_specifier_index, int name_and_type_index) {
|
||||
tag_at_put(which, JVM_CONSTANT_InvokeDynamic);
|
||||
*int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index;
|
||||
@ -554,11 +563,15 @@ class ConstantPool : public Metadata {
|
||||
}
|
||||
|
||||
int invoke_dynamic_name_and_type_ref_index_at(int which) {
|
||||
assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
|
||||
assert(tag_at(which).is_invoke_dynamic() ||
|
||||
tag_at(which).is_dynamic_constant() ||
|
||||
tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
|
||||
return extract_high_short_from_int(*int_at_addr(which));
|
||||
}
|
||||
int invoke_dynamic_bootstrap_specifier_index(int which) {
|
||||
assert(tag_at(which).value() == JVM_CONSTANT_InvokeDynamic, "Corrupted constant pool");
|
||||
assert(tag_at(which).is_invoke_dynamic() ||
|
||||
tag_at(which).is_dynamic_constant() ||
|
||||
tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
|
||||
return extract_low_short_from_int(*int_at_addr(which));
|
||||
}
|
||||
int invoke_dynamic_operand_base(int which) {
|
||||
@ -608,7 +621,7 @@ class ConstantPool : public Metadata {
|
||||
}
|
||||
#endif //ASSERT
|
||||
|
||||
// layout of InvokeDynamic bootstrap method specifier (in second part of operands array):
|
||||
// layout of InvokeDynamic and Dynamic bootstrap method specifier (in second part of operands array):
|
||||
enum {
|
||||
_indy_bsm_offset = 0, // CONSTANT_MethodHandle bsm
|
||||
_indy_argc_offset = 1, // u2 argc
|
||||
@ -654,14 +667,17 @@ class ConstantPool : public Metadata {
|
||||
// Shrink the operands array to a smaller array with new_len length
|
||||
void shrink_operands(int new_len, TRAPS);
|
||||
|
||||
|
||||
int invoke_dynamic_bootstrap_method_ref_index_at(int which) {
|
||||
assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
|
||||
assert(tag_at(which).is_invoke_dynamic() ||
|
||||
tag_at(which).is_dynamic_constant() ||
|
||||
tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
|
||||
int op_base = invoke_dynamic_operand_base(which);
|
||||
return operands()->at(op_base + _indy_bsm_offset);
|
||||
}
|
||||
int invoke_dynamic_argument_count_at(int which) {
|
||||
assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
|
||||
assert(tag_at(which).is_invoke_dynamic() ||
|
||||
tag_at(which).is_dynamic_constant() ||
|
||||
tag_at(which).is_dynamic_constant_in_error(), "Corrupted constant pool");
|
||||
int op_base = invoke_dynamic_operand_base(which);
|
||||
int argc = operands()->at(op_base + _indy_argc_offset);
|
||||
DEBUG_ONLY(int end_offset = op_base + _indy_argv_offset + argc;
|
||||
@ -731,20 +747,27 @@ class ConstantPool : public Metadata {
|
||||
enum { _no_index_sentinel = -1, _possible_index_sentinel = -2 };
|
||||
public:
|
||||
|
||||
BasicType basic_type_for_constant_at(int which);
|
||||
|
||||
// Resolve late bound constants.
|
||||
oop resolve_constant_at(int index, TRAPS) {
|
||||
constantPoolHandle h_this(THREAD, this);
|
||||
return resolve_constant_at_impl(h_this, index, _no_index_sentinel, THREAD);
|
||||
return resolve_constant_at_impl(h_this, index, _no_index_sentinel, NULL, THREAD);
|
||||
}
|
||||
|
||||
oop resolve_cached_constant_at(int cache_index, TRAPS) {
|
||||
constantPoolHandle h_this(THREAD, this);
|
||||
return resolve_constant_at_impl(h_this, _no_index_sentinel, cache_index, THREAD);
|
||||
return resolve_constant_at_impl(h_this, _no_index_sentinel, cache_index, NULL, THREAD);
|
||||
}
|
||||
|
||||
oop resolve_possibly_cached_constant_at(int pool_index, TRAPS) {
|
||||
constantPoolHandle h_this(THREAD, this);
|
||||
return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, THREAD);
|
||||
return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, NULL, THREAD);
|
||||
}
|
||||
|
||||
oop find_cached_constant_at(int pool_index, bool& found_it, TRAPS) {
|
||||
constantPoolHandle h_this(THREAD, this);
|
||||
return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, &found_it, THREAD);
|
||||
}
|
||||
|
||||
oop resolve_bootstrap_specifier_at(int index, TRAPS) {
|
||||
@ -752,6 +775,15 @@ class ConstantPool : public Metadata {
|
||||
return resolve_bootstrap_specifier_at_impl(h_this, index, THREAD);
|
||||
}
|
||||
|
||||
void copy_bootstrap_arguments_at(int index,
|
||||
int start_arg, int end_arg,
|
||||
objArrayHandle info, int pos,
|
||||
bool must_resolve, Handle if_not_available, TRAPS) {
|
||||
constantPoolHandle h_this(THREAD, this);
|
||||
copy_bootstrap_arguments_at_impl(h_this, index, start_arg, end_arg,
|
||||
info, pos, must_resolve, if_not_available, THREAD);
|
||||
}
|
||||
|
||||
// Klass name matches name at offset
|
||||
bool klass_name_at_matches(const InstanceKlass* k, int which);
|
||||
|
||||
@ -833,6 +865,7 @@ class ConstantPool : public Metadata {
|
||||
|
||||
Symbol* impl_name_ref_at(int which, bool uncached);
|
||||
Symbol* impl_signature_ref_at(int which, bool uncached);
|
||||
|
||||
int impl_klass_ref_index_at(int which, bool uncached);
|
||||
int impl_name_and_type_ref_index_at(int which, bool uncached);
|
||||
constantTag impl_tag_ref_at(int which, bool uncached);
|
||||
@ -862,8 +895,13 @@ class ConstantPool : public Metadata {
|
||||
// Resolve string constants (to prevent allocation during compilation)
|
||||
static void resolve_string_constants_impl(const constantPoolHandle& this_cp, TRAPS);
|
||||
|
||||
static oop resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index, TRAPS);
|
||||
static oop resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index,
|
||||
bool* status_return, TRAPS);
|
||||
static oop resolve_bootstrap_specifier_at_impl(const constantPoolHandle& this_cp, int index, TRAPS);
|
||||
static void copy_bootstrap_arguments_at_impl(const constantPoolHandle& this_cp, int index,
|
||||
int start_arg, int end_arg,
|
||||
objArrayHandle info, int pos,
|
||||
bool must_resolve, Handle if_not_available, TRAPS);
|
||||
|
||||
// Exception handling
|
||||
static Symbol* exception_message(const constantPoolHandle& this_cp, int which, constantTag tag, oop pending_exception);
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/cpCache.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -741,13 +742,16 @@ void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) {
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
oop ConstantPoolCache::archived_references() {
|
||||
assert(UseSharedSpaces, "UseSharedSpaces expected.");
|
||||
return oopDesc::decode_heap_oop(_archived_references);
|
||||
// Loading an archive root forces the oop to become strongly reachable.
|
||||
// For example, if it is loaded during concurrent marking in a SATB
|
||||
// collector, it will be enqueued to the SATB queue, effectively
|
||||
// shading the previously white object gray.
|
||||
return RootAccess<IN_ARCHIVE_ROOT>::oop_load(&_archived_references);
|
||||
}
|
||||
|
||||
void ConstantPoolCache::set_archived_references(oop o) {
|
||||
assert(DumpSharedSpaces, "called only during runtime");
|
||||
_archived_references = oopDesc::encode_heap_oop(o);
|
||||
RootAccess<IN_ARCHIVE_ROOT>::oop_store(&_archived_references, o);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1878,13 +1878,15 @@ void GenerateOopMap::do_ldc(int bci) {
|
||||
ConstantPool* cp = method()->constants();
|
||||
constantTag tag = cp->tag_at(ldc.pool_index()); // idx is index in resolved_references
|
||||
BasicType bt = ldc.result_type();
|
||||
#ifdef ASSERT
|
||||
BasicType tag_bt = tag.is_dynamic_constant() ? bt : tag.basic_type();
|
||||
assert(bt == tag_bt, "same result");
|
||||
#endif
|
||||
CellTypeState cts;
|
||||
if (tag.basic_type() == T_OBJECT) {
|
||||
if (is_reference_type(bt)) { // could be T_ARRAY with condy
|
||||
assert(!tag.is_string_index() && !tag.is_klass_index(), "Unexpected index tag");
|
||||
assert(bt == T_OBJECT, "Guard is incorrect");
|
||||
cts = CellTypeState::make_line_ref(bci);
|
||||
} else {
|
||||
assert(bt != T_OBJECT, "Guard is incorrect");
|
||||
cts = valCTS;
|
||||
}
|
||||
ppush1(cts);
|
||||
|
@ -2229,7 +2229,7 @@ void InstanceKlass::release_C_heap_structures() {
|
||||
}
|
||||
|
||||
// deallocate the cached class file
|
||||
if (_cached_class_file != NULL && !MetaspaceShared::is_in_shared_space(_cached_class_file)) {
|
||||
if (_cached_class_file != NULL && !MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
|
||||
os::free(_cached_class_file);
|
||||
_cached_class_file = NULL;
|
||||
}
|
||||
@ -3732,7 +3732,7 @@ Method* InstanceKlass::method_with_orig_idnum(int idnum, int version) {
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
JvmtiCachedClassFileData* InstanceKlass::get_cached_class_file() {
|
||||
if (MetaspaceShared::is_in_shared_space(_cached_class_file)) {
|
||||
if (MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
|
||||
// Ignore the archived class stream data
|
||||
return NULL;
|
||||
} else {
|
||||
@ -3754,7 +3754,7 @@ JvmtiCachedClassFileData* InstanceKlass::get_archived_class_data() {
|
||||
return _cached_class_file;
|
||||
} else {
|
||||
assert(this->is_shared(), "class should be shared");
|
||||
if (MetaspaceShared::is_in_shared_space(_cached_class_file)) {
|
||||
if (MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
|
||||
return _cached_class_file;
|
||||
} else {
|
||||
return NULL;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1040,7 +1040,7 @@ void itableMethodEntry::initialize(Method* m) {
|
||||
if (m == NULL) return;
|
||||
|
||||
#ifdef ASSERT
|
||||
if (MetaspaceShared::is_in_shared_space((void*)&_method) &&
|
||||
if (MetaspaceShared::is_in_shared_metaspace((void*)&_method) &&
|
||||
!MetaspaceShared::remapped_readwrite()) {
|
||||
// At runtime initialize_itable is rerun as part of link_class_impl()
|
||||
// for a shared class loaded by the non-boot loader.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user