Merge
This commit is contained in:
commit
925d275a62
.hgtagsCONTRIBUTING.mdREADMEREADME.md
make
src/hotspot
cpu
aarch64
aarch64.adassembler_aarch64.cppassembler_aarch64.hppc2_globals_aarch64.hppmacroAssembler_aarch64.cppmacroAssembler_aarch64.hppvm_version_aarch64.cpp
arm
ppc
x86
os
share
aot
c1
classfile
classFileParser.cppclassLoader.hppclassLoader.inline.hppcompactHashtable.cppdefaultMethods.cppdictionary.cppklassFactory.cpppackageEntry.cppstringTable.cppstringTable.hppsymbolTable.cppsymbolTable.hppsystemDictionary.cppverificationType.cppverifier.cpp
code
compiler
gc
parallel
shenandoah/c2
z
jfr
jvmci
jvmci.cppjvmciCodeInstaller.cppjvmciCodeInstaller.hppjvmciCompiler.cppjvmciJavaClasses.hppjvmci_globals.cppvmStructs_jvmci.cpp
memory
archiveBuilder.cpparchiveBuilder.hpparchiveUtils.cpparchiveUtils.hpparena.hppdynamicArchive.cppheapShared.cppheapShared.hppmetaspace.cppmetaspaceShared.cppmetaspaceShared.hppresourceArea.cppresourceArea.hppresourceArea.inline.hpp
oops
1
.hgtags
1
.hgtags
@ -660,3 +660,4 @@ b01985b4f88f554f97901e53e1ba314681dd9c19 jdk-16+10
|
||||
e3f940bd3c8fcdf4ca704c6eb1ac745d155859d5 jdk-15+36
|
||||
5c18d696c7ce724ca36df13933aa53f50e12b9e0 jdk-16+11
|
||||
fc8e62b399bd93d06e8d13dc3b384c450e853dcd jdk-16+12
|
||||
fd07cdb26fc70243ef23d688b545514f4ddf1c2b jdk-16+13
|
||||
|
3
CONTRIBUTING.md
Normal file
3
CONTRIBUTING.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Contributing to the JDK
|
||||
|
||||
Please see <https://openjdk.java.net/contribute/> for how to contribute.
|
12
README
12
README
@ -1,12 +0,0 @@
|
||||
|
||||
Welcome to the JDK!
|
||||
===================
|
||||
|
||||
For build instructions, please see either of these files:
|
||||
|
||||
* doc/building.html (html version)
|
||||
* doc/building.md (markdown version)
|
||||
|
||||
See https://openjdk.java.net/ for more information about
|
||||
the OpenJDK Community and the JDK.
|
||||
|
11
README.md
Normal file
11
README.md
Normal file
@ -0,0 +1,11 @@
|
||||
# Welcome to the JDK!
|
||||
|
||||
For build instructions please see the
|
||||
[online documentation](https://openjdk.java.net/groups/build/doc/building.html),
|
||||
or either of these files:
|
||||
|
||||
- [doc/building.html](doc/building.html) (html version)
|
||||
- [doc/building.md](doc/building.md) (markdown version)
|
||||
|
||||
See <https://openjdk.java.net/> for more information about
|
||||
the OpenJDK Community and the JDK.
|
@ -298,6 +298,8 @@ define SetupApiDocsGenerationBody
|
||||
# Create a string like "-Xdoclint:all,-syntax,-html,..."
|
||||
$1_OPTIONS += -Xdoclint:all,$$(call CommaList, $$(addprefix -, \
|
||||
$$(JAVADOC_DISABLED_DOCLINT)))
|
||||
# Ignore the doclint warnings in the W3C DOM package
|
||||
$1_OPTIONS += -Xdoclint/package:-org.w3c.*
|
||||
|
||||
$1_DOC_TITLE := $$($1_LONG_NAME)<br>Version $$(VERSION_SPECIFICATION) API \
|
||||
Specification
|
||||
|
@ -3159,7 +3159,7 @@ encode %{
|
||||
if (con < (address)(uintptr_t)os::vm_page_size()) {
|
||||
__ mov(dst_reg, con);
|
||||
} else {
|
||||
uintptr_t offset;
|
||||
uint64_t offset;
|
||||
__ adrp(dst_reg, con, offset);
|
||||
__ add(dst_reg, dst_reg, offset);
|
||||
}
|
||||
|
@ -1543,7 +1543,7 @@ void Address::lea(MacroAssembler *as, Register r) const {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::adrp(Register reg1, const Address &dest, uintptr_t &byte_offset) {
|
||||
void Assembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
@ -1714,7 +1714,7 @@ void Assembler::add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int o
|
||||
|
||||
bool Assembler::operand_valid_for_add_sub_immediate(int64_t imm) {
|
||||
bool shift = false;
|
||||
uint64_t uimm = (uint64_t)uabs(imm);
|
||||
uint64_t uimm = (uint64_t)uabs((jlong)imm);
|
||||
if (uimm < (1 << 12))
|
||||
return true;
|
||||
if (uimm < (1 << 24)
|
||||
|
@ -380,9 +380,15 @@ class Address {
|
||||
: _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, int o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, int64_t o)
|
||||
Address(Register r, long o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, uint64_t o)
|
||||
Address(Register r, long long o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, unsigned int o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, unsigned long o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
Address(Register r, unsigned long long o)
|
||||
: _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
|
||||
#ifdef ASSERT
|
||||
Address(Register r, ByteSize disp)
|
||||
|
@ -46,7 +46,7 @@ define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
define_pd_global(intx, ConditionalMoveLimit, 3);
|
||||
define_pd_global(intx, FLOATPRESSURE, 64);
|
||||
define_pd_global(intx, FLOATPRESSURE, 32);
|
||||
define_pd_global(intx, FreqInlineSize, 325);
|
||||
define_pd_global(intx, MinJumpTableSize, 10);
|
||||
define_pd_global(intx, INTPRESSURE, 24);
|
||||
|
@ -389,7 +389,7 @@ void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) {
|
||||
assert(CodeCache::find_blob(entry.target()) != NULL,
|
||||
"destination of far call not found in code cache");
|
||||
if (far_branches()) {
|
||||
uintptr_t offset;
|
||||
uint64_t offset;
|
||||
// We can use ADRP here because we know that the total size of
|
||||
// the code cache cannot exceed 2Gb.
|
||||
adrp(tmp, entry, offset);
|
||||
@ -407,7 +407,7 @@ void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) {
|
||||
assert(CodeCache::find_blob(entry.target()) != NULL,
|
||||
"destination of far call not found in code cache");
|
||||
if (far_branches()) {
|
||||
uintptr_t offset;
|
||||
uint64_t offset;
|
||||
// We can use ADRP here because we know that the total size of
|
||||
// the code cache cannot exceed 2Gb.
|
||||
adrp(tmp, entry, offset);
|
||||
@ -1499,7 +1499,7 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) {
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
char buffer[64];
|
||||
snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
|
||||
snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
|
||||
block_comment(buffer);
|
||||
}
|
||||
#endif
|
||||
@ -2579,43 +2579,43 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
|
||||
#endif
|
||||
if (os::message_box(msg, "Execution stopped, print registers?")) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr(" pc = 0x%016lx", pc);
|
||||
tty->print_cr(" pc = 0x%016" PRIx64, pc);
|
||||
#ifndef PRODUCT
|
||||
tty->cr();
|
||||
findpc(pc);
|
||||
tty->cr();
|
||||
#endif
|
||||
tty->print_cr(" r0 = 0x%016lx", regs[0]);
|
||||
tty->print_cr(" r1 = 0x%016lx", regs[1]);
|
||||
tty->print_cr(" r2 = 0x%016lx", regs[2]);
|
||||
tty->print_cr(" r3 = 0x%016lx", regs[3]);
|
||||
tty->print_cr(" r4 = 0x%016lx", regs[4]);
|
||||
tty->print_cr(" r5 = 0x%016lx", regs[5]);
|
||||
tty->print_cr(" r6 = 0x%016lx", regs[6]);
|
||||
tty->print_cr(" r7 = 0x%016lx", regs[7]);
|
||||
tty->print_cr(" r8 = 0x%016lx", regs[8]);
|
||||
tty->print_cr(" r9 = 0x%016lx", regs[9]);
|
||||
tty->print_cr("r10 = 0x%016lx", regs[10]);
|
||||
tty->print_cr("r11 = 0x%016lx", regs[11]);
|
||||
tty->print_cr("r12 = 0x%016lx", regs[12]);
|
||||
tty->print_cr("r13 = 0x%016lx", regs[13]);
|
||||
tty->print_cr("r14 = 0x%016lx", regs[14]);
|
||||
tty->print_cr("r15 = 0x%016lx", regs[15]);
|
||||
tty->print_cr("r16 = 0x%016lx", regs[16]);
|
||||
tty->print_cr("r17 = 0x%016lx", regs[17]);
|
||||
tty->print_cr("r18 = 0x%016lx", regs[18]);
|
||||
tty->print_cr("r19 = 0x%016lx", regs[19]);
|
||||
tty->print_cr("r20 = 0x%016lx", regs[20]);
|
||||
tty->print_cr("r21 = 0x%016lx", regs[21]);
|
||||
tty->print_cr("r22 = 0x%016lx", regs[22]);
|
||||
tty->print_cr("r23 = 0x%016lx", regs[23]);
|
||||
tty->print_cr("r24 = 0x%016lx", regs[24]);
|
||||
tty->print_cr("r25 = 0x%016lx", regs[25]);
|
||||
tty->print_cr("r26 = 0x%016lx", regs[26]);
|
||||
tty->print_cr("r27 = 0x%016lx", regs[27]);
|
||||
tty->print_cr("r28 = 0x%016lx", regs[28]);
|
||||
tty->print_cr("r30 = 0x%016lx", regs[30]);
|
||||
tty->print_cr("r31 = 0x%016lx", regs[31]);
|
||||
tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]);
|
||||
tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]);
|
||||
tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]);
|
||||
tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]);
|
||||
tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]);
|
||||
tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]);
|
||||
tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]);
|
||||
tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]);
|
||||
tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]);
|
||||
tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]);
|
||||
tty->print_cr("r10 = 0x%016" PRIx64, regs[10]);
|
||||
tty->print_cr("r11 = 0x%016" PRIx64, regs[11]);
|
||||
tty->print_cr("r12 = 0x%016" PRIx64, regs[12]);
|
||||
tty->print_cr("r13 = 0x%016" PRIx64, regs[13]);
|
||||
tty->print_cr("r14 = 0x%016" PRIx64, regs[14]);
|
||||
tty->print_cr("r15 = 0x%016" PRIx64, regs[15]);
|
||||
tty->print_cr("r16 = 0x%016" PRIx64, regs[16]);
|
||||
tty->print_cr("r17 = 0x%016" PRIx64, regs[17]);
|
||||
tty->print_cr("r18 = 0x%016" PRIx64, regs[18]);
|
||||
tty->print_cr("r19 = 0x%016" PRIx64, regs[19]);
|
||||
tty->print_cr("r20 = 0x%016" PRIx64, regs[20]);
|
||||
tty->print_cr("r21 = 0x%016" PRIx64, regs[21]);
|
||||
tty->print_cr("r22 = 0x%016" PRIx64, regs[22]);
|
||||
tty->print_cr("r23 = 0x%016" PRIx64, regs[23]);
|
||||
tty->print_cr("r24 = 0x%016" PRIx64, regs[24]);
|
||||
tty->print_cr("r25 = 0x%016" PRIx64, regs[25]);
|
||||
tty->print_cr("r26 = 0x%016" PRIx64, regs[26]);
|
||||
tty->print_cr("r27 = 0x%016" PRIx64, regs[27]);
|
||||
tty->print_cr("r28 = 0x%016" PRIx64, regs[28]);
|
||||
tty->print_cr("r30 = 0x%016" PRIx64, regs[30]);
|
||||
tty->print_cr("r31 = 0x%016" PRIx64, regs[31]);
|
||||
BREAKPOINT;
|
||||
}
|
||||
}
|
||||
|
@ -489,31 +489,20 @@ public:
|
||||
// now mov instructions for loading absolute addresses and 32 or
|
||||
// 64 bit integers
|
||||
|
||||
inline void mov(Register dst, address addr)
|
||||
{
|
||||
mov_immediate64(dst, (uint64_t)addr);
|
||||
}
|
||||
inline void mov(Register dst, address addr) { mov_immediate64(dst, (uint64_t)addr); }
|
||||
|
||||
inline void mov(Register dst, uint64_t imm64)
|
||||
{
|
||||
mov_immediate64(dst, imm64);
|
||||
}
|
||||
inline void mov(Register dst, int imm64) { mov_immediate64(dst, (uint64_t)imm64); }
|
||||
inline void mov(Register dst, long imm64) { mov_immediate64(dst, (uint64_t)imm64); }
|
||||
inline void mov(Register dst, long long imm64) { mov_immediate64(dst, (uint64_t)imm64); }
|
||||
inline void mov(Register dst, unsigned int imm64) { mov_immediate64(dst, (uint64_t)imm64); }
|
||||
inline void mov(Register dst, unsigned long imm64) { mov_immediate64(dst, (uint64_t)imm64); }
|
||||
inline void mov(Register dst, unsigned long long imm64) { mov_immediate64(dst, (uint64_t)imm64); }
|
||||
|
||||
inline void movw(Register dst, uint32_t imm32)
|
||||
{
|
||||
mov_immediate32(dst, imm32);
|
||||
}
|
||||
|
||||
inline void mov(Register dst, int64_t l)
|
||||
{
|
||||
mov(dst, (uint64_t)l);
|
||||
}
|
||||
|
||||
inline void mov(Register dst, int i)
|
||||
{
|
||||
mov(dst, (int64_t)i);
|
||||
}
|
||||
|
||||
void mov(Register dst, RegisterOrConstant src) {
|
||||
if (src.is_register())
|
||||
mov(dst, src.as_register());
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "jvm.h"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
|
@ -436,6 +436,10 @@ class Assembler : public AbstractAssembler {
|
||||
NAND_OPCODE = (31u << OPCODE_SHIFT | 476u << XO_21_30_SHIFT), // X-FORM
|
||||
NOR_OPCODE = (31u << OPCODE_SHIFT | 124u << XO_21_30_SHIFT), // X-FORM
|
||||
|
||||
// Byte reverse opcodes (introduced with Power10)
|
||||
BRH_OPCODE = (31u << OPCODE_SHIFT | 219u << 1), // X-FORM
|
||||
BRW_OPCODE = (31u << OPCODE_SHIFT | 155u << 1), // X-FORM
|
||||
BRD_OPCODE = (31u << OPCODE_SHIFT | 187u << 1), // X-FORM
|
||||
|
||||
// opcodes only used for floating arithmetic
|
||||
FADD_OPCODE = (63u << OPCODE_SHIFT | 21u << 1),
|
||||
@ -1568,6 +1572,11 @@ class Assembler : public AbstractAssembler {
|
||||
// testbit with condition register
|
||||
inline void testbitdi(ConditionRegister cr, Register a, Register s, int ui6);
|
||||
|
||||
// Byte reverse instructions (introduced with Power10)
|
||||
inline void brh( Register a, Register s);
|
||||
inline void brw( Register a, Register s);
|
||||
inline void brd( Register a, Register s);
|
||||
|
||||
// rotate instructions
|
||||
inline void rotldi( Register a, Register s, int n);
|
||||
inline void rotrdi( Register a, Register s, int n);
|
||||
|
@ -287,6 +287,11 @@ inline void Assembler::testbitdi(ConditionRegister cr, Register a, Register s, i
|
||||
}
|
||||
}
|
||||
|
||||
// Byte reverse instructions (introduced with Power10)
|
||||
inline void Assembler::brh(Register a, Register s) { emit_int32(BRH_OPCODE | rta(a) | rs(s)); }
|
||||
inline void Assembler::brw(Register a, Register s) { emit_int32(BRW_OPCODE | rta(a) | rs(s)); }
|
||||
inline void Assembler::brd(Register a, Register s) { emit_int32(BRD_OPCODE | rta(a) | rs(s)); }
|
||||
|
||||
// rotate instructions
|
||||
inline void Assembler::rotldi( Register a, Register s, int n) { Assembler::rldicl(a, s, n, 0); }
|
||||
inline void Assembler::rotrdi( Register a, Register s, int n) { Assembler::rldicl(a, s, 64-n, 0); }
|
||||
|
@ -84,8 +84,9 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
|
||||
constraint) \
|
||||
\
|
||||
product(uintx, PowerArchitecturePPC64, 0, \
|
||||
"CPU Version: x for PowerX. Currently recognizes Power5 to " \
|
||||
"Power8. Default is 0. Newer CPUs will be recognized as Power8.") \
|
||||
"Specify the PowerPC family version in use. If not provided, " \
|
||||
"HotSpot will determine it automatically. Host family version " \
|
||||
"is the maximum value allowed (instructions are not emulated).") \
|
||||
\
|
||||
product(bool, SuperwordUseVSX, false, \
|
||||
"Use Power8 VSX instructions for superword optimization.") \
|
||||
@ -112,6 +113,9 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
|
||||
"Use load instructions for stack banging.") \
|
||||
\
|
||||
/* special instructions */ \
|
||||
product(bool, UseByteReverseInstructions, false, \
|
||||
"Use byte reverse instructions.") \
|
||||
\
|
||||
product(bool, UseVectorByteReverseInstructionsPPC64, false, \
|
||||
"Use Power9 xxbr* vector byte reverse instructions.") \
|
||||
\
|
||||
|
@ -13718,6 +13718,7 @@ instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
|
||||
// Just slightly faster than java implementation.
|
||||
instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesI src));
|
||||
predicate(!UseByteReverseInstructions);
|
||||
ins_cost(7*DEFAULT_COST);
|
||||
|
||||
expand %{
|
||||
@ -13758,8 +13759,23 @@ instruct bytes_reverse_int_vec(iRegIdst dst, iRegIsrc src, vecX tmpV) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_int(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesI src));
|
||||
predicate(UseByteReverseInstructions);
|
||||
ins_cost(DEFAULT_COST);
|
||||
size(4);
|
||||
|
||||
format %{ "BRW $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ brw($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_long_Ex(iRegLdst dst, iRegLsrc src) %{
|
||||
match(Set dst (ReverseBytesL src));
|
||||
predicate(!UseByteReverseInstructions);
|
||||
ins_cost(15*DEFAULT_COST);
|
||||
|
||||
expand %{
|
||||
@ -13815,8 +13831,23 @@ instruct bytes_reverse_long_vec(iRegLdst dst, iRegLsrc src, vecX tmpV) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_long(iRegLdst dst, iRegLsrc src) %{
|
||||
match(Set dst (ReverseBytesL src));
|
||||
predicate(UseByteReverseInstructions);
|
||||
ins_cost(DEFAULT_COST);
|
||||
size(4);
|
||||
|
||||
format %{ "BRD $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ brd($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_ushort_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesUS src));
|
||||
predicate(!UseByteReverseInstructions);
|
||||
ins_cost(2*DEFAULT_COST);
|
||||
|
||||
expand %{
|
||||
@ -13828,8 +13859,23 @@ instruct bytes_reverse_ushort_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_ushort(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesUS src));
|
||||
predicate(UseByteReverseInstructions);
|
||||
ins_cost(DEFAULT_COST);
|
||||
size(4);
|
||||
|
||||
format %{ "BRH $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ brh($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesS src));
|
||||
predicate(!UseByteReverseInstructions);
|
||||
ins_cost(3*DEFAULT_COST);
|
||||
|
||||
expand %{
|
||||
@ -13843,6 +13889,22 @@ instruct bytes_reverse_short_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesS src));
|
||||
predicate(UseByteReverseInstructions);
|
||||
ins_cost(DEFAULT_COST);
|
||||
size(8);
|
||||
|
||||
format %{ "BRH $dst, $src\n\t"
|
||||
"EXTSH $dst, $dst" %}
|
||||
|
||||
ins_encode %{
|
||||
__ brh($dst$$Register, $src$$Register);
|
||||
__ extsh($dst$$Register, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Load Integer reversed byte order
|
||||
instruct loadI_reversed(iRegIdst dst, indirect mem) %{
|
||||
match(Set dst (ReverseBytesI (LoadI mem)));
|
||||
|
@ -67,7 +67,9 @@ void VM_Version::initialize() {
|
||||
|
||||
// If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
|
||||
if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
|
||||
if (VM_Version::has_darn()) {
|
||||
if (VM_Version::has_brw()) {
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 10);
|
||||
} else if (VM_Version::has_darn()) {
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 9);
|
||||
} else if (VM_Version::has_lqarx()) {
|
||||
FLAG_SET_ERGO(PowerArchitecturePPC64, 8);
|
||||
@ -84,12 +86,13 @@ void VM_Version::initialize() {
|
||||
|
||||
bool PowerArchitecturePPC64_ok = false;
|
||||
switch (PowerArchitecturePPC64) {
|
||||
case 9: if (!VM_Version::has_darn() ) break;
|
||||
case 8: if (!VM_Version::has_lqarx() ) break;
|
||||
case 7: if (!VM_Version::has_popcntw()) break;
|
||||
case 6: if (!VM_Version::has_cmpb() ) break;
|
||||
case 5: if (!VM_Version::has_popcntb()) break;
|
||||
case 0: PowerArchitecturePPC64_ok = true; break;
|
||||
case 10: if (!VM_Version::has_brw() ) break;
|
||||
case 9: if (!VM_Version::has_darn() ) break;
|
||||
case 8: if (!VM_Version::has_lqarx() ) break;
|
||||
case 7: if (!VM_Version::has_popcntw()) break;
|
||||
case 6: if (!VM_Version::has_cmpb() ) break;
|
||||
case 5: if (!VM_Version::has_popcntb()) break;
|
||||
case 0: PowerArchitecturePPC64_ok = true; break;
|
||||
default: break;
|
||||
}
|
||||
guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to "
|
||||
@ -156,12 +159,23 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseVectorByteReverseInstructionsPPC64, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (PowerArchitecturePPC64 >= 10) {
|
||||
if (FLAG_IS_DEFAULT(UseByteReverseInstructions)) {
|
||||
FLAG_SET_ERGO(UseByteReverseInstructions, true);
|
||||
}
|
||||
} else {
|
||||
if (UseByteReverseInstructions) {
|
||||
warning("UseByteReverseInstructions specified, but needs at least Power10.");
|
||||
FLAG_SET_DEFAULT(UseByteReverseInstructions, false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Create and print feature-string.
|
||||
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
(has_fsqrt() ? " fsqrt" : ""),
|
||||
(has_isel() ? " isel" : ""),
|
||||
(has_lxarxeh() ? " lxarxeh" : ""),
|
||||
@ -179,7 +193,8 @@ void VM_Version::initialize() {
|
||||
(has_stdbrx() ? " stdbrx" : ""),
|
||||
(has_vshasig() ? " sha" : ""),
|
||||
(has_tm() ? " rtm" : ""),
|
||||
(has_darn() ? " darn" : "")
|
||||
(has_darn() ? " darn" : ""),
|
||||
(has_brw() ? " brw" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
);
|
||||
_features_string = os::strdup(buf);
|
||||
@ -835,6 +850,7 @@ void VM_Version::determine_features() {
|
||||
a->vshasigmaw(VR0, VR1, 1, 0xF); // code[16] -> vshasig
|
||||
// rtm is determined by OS
|
||||
a->darn(R7); // code[17] -> darn
|
||||
a->brw(R5, R6); // code[18] -> brw
|
||||
a->blr();
|
||||
|
||||
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
|
||||
@ -888,6 +904,7 @@ void VM_Version::determine_features() {
|
||||
if (code[feature_cntr++]) features |= vshasig_m;
|
||||
// feature rtm_m is determined by OS
|
||||
if (code[feature_cntr++]) features |= darn_m;
|
||||
if (code[feature_cntr++]) features |= brw_m;
|
||||
|
||||
// Print the detection code.
|
||||
if (PrintAssembly) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2020 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,6 +51,7 @@ protected:
|
||||
vshasig,
|
||||
rtm,
|
||||
darn,
|
||||
brw,
|
||||
num_features // last entry to count features
|
||||
};
|
||||
enum Feature_Flag_Set {
|
||||
@ -74,6 +75,7 @@ protected:
|
||||
vshasig_m = (1 << vshasig),
|
||||
rtm_m = (1 << rtm ),
|
||||
darn_m = (1 << darn ),
|
||||
brw_m = (1 << brw ),
|
||||
all_features_m = (unsigned long)-1
|
||||
};
|
||||
|
||||
@ -119,6 +121,7 @@ public:
|
||||
static bool has_vshasig() { return (_features & vshasig_m) != 0; }
|
||||
static bool has_tm() { return (_features & rtm_m) != 0; }
|
||||
static bool has_darn() { return (_features & darn_m) != 0; }
|
||||
static bool has_brw() { return (_features & brw_m) != 0; }
|
||||
|
||||
static bool has_mtfprd() { return has_vpmsumb(); } // alias for P8
|
||||
|
||||
|
@ -78,7 +78,7 @@ inline void LinearScan::pd_add_temps(LIR_Op* op) {
|
||||
// assume that slow paths are uncommon but it's not clear that
|
||||
// would be a good idea.
|
||||
if (UseSSE > 0) {
|
||||
#ifndef PRODUCT
|
||||
#ifdef ASSERT
|
||||
if (TraceLinearScanLevel >= 2) {
|
||||
tty->print_cr("killing XMMs for trig");
|
||||
}
|
||||
|
@ -50,12 +50,14 @@ address VM_Version::_cpuinfo_segv_addr = 0;
|
||||
address VM_Version::_cpuinfo_cont_addr = 0;
|
||||
|
||||
static BufferBlob* stub_blob;
|
||||
static const int stub_size = 1100;
|
||||
static const int stub_size = 2000;
|
||||
|
||||
extern "C" {
|
||||
typedef void (*get_cpu_info_stub_t)(void*);
|
||||
typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*);
|
||||
}
|
||||
static get_cpu_info_stub_t get_cpu_info_stub = NULL;
|
||||
static detect_virt_stub_t detect_virt_stub = NULL;
|
||||
|
||||
|
||||
class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
@ -568,6 +570,43 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
__ vzeroupper_uncached();
|
||||
# undef __
|
||||
}
|
||||
address generate_detect_virt() {
|
||||
StubCodeMark mark(this, "VM_Version", "detect_virt_stub");
|
||||
# define __ _masm->
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
// Evacuate callee-saved registers
|
||||
__ push(rbp);
|
||||
__ push(rbx);
|
||||
__ push(rsi); // for Windows
|
||||
|
||||
#ifdef _LP64
|
||||
__ mov(rax, c_rarg0); // CPUID leaf
|
||||
__ mov(rsi, c_rarg1); // register array address (eax, ebx, ecx, edx)
|
||||
#else
|
||||
__ movptr(rax, Address(rsp, 16)); // CPUID leaf
|
||||
__ movptr(rsi, Address(rsp, 20)); // register array address
|
||||
#endif
|
||||
|
||||
__ cpuid();
|
||||
|
||||
// Store result to register array
|
||||
__ movl(Address(rsi, 0), rax);
|
||||
__ movl(Address(rsi, 4), rbx);
|
||||
__ movl(Address(rsi, 8), rcx);
|
||||
__ movl(Address(rsi, 12), rdx);
|
||||
|
||||
// Epilogue
|
||||
__ pop(rsi);
|
||||
__ pop(rbx);
|
||||
__ pop(rbp);
|
||||
__ ret(0);
|
||||
|
||||
# undef __
|
||||
|
||||
return start;
|
||||
};
|
||||
};
|
||||
|
||||
void VM_Version::get_processor_features() {
|
||||
@ -1671,56 +1710,12 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) {
|
||||
st->print_cr("VMWare virtualization detected");
|
||||
VirtualizationSupport::print_virtualization_info(st);
|
||||
} else if (vrt == HyperV) {
|
||||
st->print_cr("HyperV virtualization detected");
|
||||
st->print_cr("Hyper-V virtualization detected");
|
||||
} else if (vrt == HyperVRole) {
|
||||
st->print_cr("Hyper-V role detected");
|
||||
}
|
||||
}
|
||||
|
||||
void VM_Version::check_virt_cpuid(uint32_t idx, uint32_t *regs) {
|
||||
// TODO support 32 bit
|
||||
#if defined(_LP64)
|
||||
#if defined(_MSC_VER)
|
||||
// Allocate space for the code
|
||||
const int code_size = 100;
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb("detect_virt", code_size, 0);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
address code = a->pc();
|
||||
void (*test)(uint32_t idx, uint32_t *regs) = (void(*)(uint32_t idx, uint32_t *regs))code;
|
||||
|
||||
a->movq(r9, rbx); // save nonvolatile register
|
||||
|
||||
// next line would not work on 32-bit
|
||||
a->movq(rax, c_rarg0 /* rcx */);
|
||||
a->movq(r8, c_rarg1 /* rdx */);
|
||||
a->cpuid();
|
||||
a->movl(Address(r8, 0), rax);
|
||||
a->movl(Address(r8, 4), rbx);
|
||||
a->movl(Address(r8, 8), rcx);
|
||||
a->movl(Address(r8, 12), rdx);
|
||||
|
||||
a->movq(rbx, r9); // restore nonvolatile register
|
||||
a->ret(0);
|
||||
|
||||
uint32_t *code_end = (uint32_t *)a->pc();
|
||||
a->flush();
|
||||
|
||||
// execute code
|
||||
(*test)(idx, regs);
|
||||
#elif defined(__GNUC__)
|
||||
__asm__ volatile (
|
||||
" cpuid;"
|
||||
" mov %%eax,(%1);"
|
||||
" mov %%ebx,4(%1);"
|
||||
" mov %%ecx,8(%1);"
|
||||
" mov %%edx,12(%1);"
|
||||
: "+a" (idx)
|
||||
: "S" (regs)
|
||||
: "ebx", "ecx", "edx", "memory" );
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
bool VM_Version::use_biased_locking() {
|
||||
#if INCLUDE_RTM_OPT
|
||||
// RTM locking is most useful when there is high lock contention and
|
||||
@ -1821,59 +1816,62 @@ bool VM_Version::compute_has_intel_jcc_erratum() {
|
||||
// https://kb.vmware.com/s/article/1009458
|
||||
//
|
||||
void VM_Version::check_virtualizations() {
|
||||
#if defined(_LP64)
|
||||
uint32_t registers[4];
|
||||
char signature[13];
|
||||
uint32_t base;
|
||||
signature[12] = '\0';
|
||||
memset((void*)registers, 0, 4*sizeof(uint32_t));
|
||||
uint32_t registers[4] = {0};
|
||||
char signature[13] = {0};
|
||||
|
||||
for (base = 0x40000000; base < 0x40010000; base += 0x100) {
|
||||
check_virt_cpuid(base, registers);
|
||||
|
||||
*(uint32_t *)(signature + 0) = registers[1];
|
||||
*(uint32_t *)(signature + 4) = registers[2];
|
||||
*(uint32_t *)(signature + 8) = registers[3];
|
||||
// Xen cpuid leaves can be found 0x100 aligned boundary starting
|
||||
// from 0x40000000 until 0x40010000.
|
||||
// https://lists.linuxfoundation.org/pipermail/virtualization/2012-May/019974.html
|
||||
for (int leaf = 0x40000000; leaf < 0x40010000; leaf += 0x100) {
|
||||
detect_virt_stub(leaf, registers);
|
||||
memcpy(signature, ®isters[1], 12);
|
||||
|
||||
if (strncmp("VMwareVMware", signature, 12) == 0) {
|
||||
Abstract_VM_Version::_detected_virtualization = VMWare;
|
||||
// check for extended metrics from guestlib
|
||||
VirtualizationSupport::initialize();
|
||||
}
|
||||
|
||||
if (strncmp("Microsoft Hv", signature, 12) == 0) {
|
||||
} else if (strncmp("Microsoft Hv", signature, 12) == 0) {
|
||||
Abstract_VM_Version::_detected_virtualization = HyperV;
|
||||
}
|
||||
|
||||
if (strncmp("KVMKVMKVM", signature, 9) == 0) {
|
||||
#ifdef _WINDOWS
|
||||
// CPUID leaf 0x40000007 is available to the root partition only.
|
||||
// See Hypervisor Top Level Functional Specification section 2.4.8 for more details.
|
||||
// https://github.com/MicrosoftDocs/Virtualization-Documentation/raw/master/tlfs/Hypervisor%20Top%20Level%20Functional%20Specification%20v6.0b.pdf
|
||||
detect_virt_stub(0x40000007, registers);
|
||||
if ((registers[0] != 0x0) ||
|
||||
(registers[1] != 0x0) ||
|
||||
(registers[2] != 0x0) ||
|
||||
(registers[3] != 0x0)) {
|
||||
Abstract_VM_Version::_detected_virtualization = HyperVRole;
|
||||
}
|
||||
#endif
|
||||
} else if (strncmp("KVMKVMKVM", signature, 9) == 0) {
|
||||
Abstract_VM_Version::_detected_virtualization = KVM;
|
||||
}
|
||||
|
||||
if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
|
||||
} else if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
|
||||
Abstract_VM_Version::_detected_virtualization = XenHVM;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void VM_Version::initialize() {
|
||||
ResourceMark rm;
|
||||
// Making this stub must be FIRST use of assembler
|
||||
|
||||
stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
|
||||
stub_blob = BufferBlob::create("VM_Version stub", stub_size);
|
||||
if (stub_blob == NULL) {
|
||||
vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
|
||||
vm_exit_during_initialization("Unable to allocate stub for VM_Version");
|
||||
}
|
||||
CodeBuffer c(stub_blob);
|
||||
VM_Version_StubGenerator g(&c);
|
||||
|
||||
get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
|
||||
g.generate_get_cpu_info());
|
||||
detect_virt_stub = CAST_TO_FN_PTR(detect_virt_stub_t,
|
||||
g.generate_detect_virt());
|
||||
|
||||
get_processor_features();
|
||||
|
||||
LP64_ONLY(Assembler::precompute_instructions();)
|
||||
|
||||
if (cpu_family() > 4) { // it supports CPUID
|
||||
if (VM_Version::supports_hv()) { // Supports hypervisor
|
||||
check_virtualizations();
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +89,8 @@ class VM_Version : public Abstract_VM_Version {
|
||||
: 1,
|
||||
osxsave : 1,
|
||||
avx : 1,
|
||||
: 3;
|
||||
: 2,
|
||||
hv : 1;
|
||||
} bits;
|
||||
};
|
||||
|
||||
@ -348,6 +349,7 @@ protected:
|
||||
#define CPU_CLWB ((uint64_t)UCONST64( 0x80000000000)) // clwb instruction
|
||||
#define CPU_AVX512_VBMI2 ((uint64_t)UCONST64(0x100000000000)) // VBMI2 shift left double instructions
|
||||
#define CPU_AVX512_VBMI ((uint64_t)UCONST64(0x200000000000)) // Vector BMI instructions
|
||||
#define CPU_HV_PRESENT ((uint64_t)UCONST64(0x400000000000)) // for hypervisor detection
|
||||
|
||||
// NB! When adding new CPU feature detection consider updating vmStructs_x86.hpp, vmStructs_jvmci.hpp, and VM_Version::get_processor_features().
|
||||
|
||||
@ -580,6 +582,8 @@ enum Extended_Family {
|
||||
result |= CPU_AVX512_VBMI2;
|
||||
}
|
||||
}
|
||||
if (_cpuid_info.std_cpuid1_ecx.bits.hv != 0)
|
||||
result |= CPU_HV_PRESENT;
|
||||
if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
|
||||
result |= CPU_BMI1;
|
||||
if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
|
||||
@ -871,6 +875,7 @@ public:
|
||||
static bool supports_avx512_vnni() { return (_features & CPU_AVX512_VNNI) != 0; }
|
||||
static bool supports_avx512_vbmi() { return (_features & CPU_AVX512_VBMI) != 0; }
|
||||
static bool supports_avx512_vbmi2() { return (_features & CPU_AVX512_VBMI2) != 0; }
|
||||
static bool supports_hv() { return (_features & CPU_HV_PRESENT) != 0; }
|
||||
|
||||
// Intel features
|
||||
static bool is_intel_family_core() { return is_intel() &&
|
||||
@ -1023,7 +1028,6 @@ public:
|
||||
|
||||
// support functions for virtualization detection
|
||||
private:
|
||||
static void check_virt_cpuid(uint32_t idx, uint32_t *regs);
|
||||
static void check_virtualizations();
|
||||
};
|
||||
|
||||
|
@ -83,7 +83,7 @@ void ZMountPoint::get_mountpoints(const char* filesystem, ZArray<char*>* mountpo
|
||||
while (getline(&line, &length, fd) != -1) {
|
||||
char* const mountpoint = get_mountpoint(line, filesystem);
|
||||
if (mountpoint != NULL) {
|
||||
mountpoints->add(mountpoint);
|
||||
mountpoints->append(mountpoint);
|
||||
}
|
||||
}
|
||||
|
||||
@ -129,10 +129,10 @@ char* ZMountPoint::find_mountpoint(const char* filesystem, const char** preferre
|
||||
|
||||
get_mountpoints(filesystem, &mountpoints);
|
||||
|
||||
if (mountpoints.size() == 0) {
|
||||
if (mountpoints.length() == 0) {
|
||||
// No mount point found
|
||||
log_error_p(gc)("Failed to find an accessible %s filesystem", filesystem);
|
||||
} else if (mountpoints.size() == 1) {
|
||||
} else if (mountpoints.length() == 1) {
|
||||
// One mount point found
|
||||
path = strdup(mountpoints.at(0));
|
||||
} else {
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
|
@ -1609,9 +1609,7 @@ void os::print_os_info(outputStream* st) {
|
||||
|
||||
os::win32::print_uptime_info(st);
|
||||
|
||||
#ifdef _LP64
|
||||
VM_Version::print_platform_virtualization_info(st);
|
||||
#endif
|
||||
}
|
||||
|
||||
void os::win32::print_windows_version(outputStream* st) {
|
||||
|
@ -40,6 +40,7 @@ private:
|
||||
int _verified_entry;
|
||||
int _exception_handler_offset;
|
||||
int _deopt_handler_offset;
|
||||
int _deopt_mh_handler_offset;
|
||||
int _stubs_offset;
|
||||
int _frame_size;
|
||||
// location in frame (offset for sp) that deopt can store the original
|
||||
@ -78,6 +79,7 @@ public:
|
||||
int verified_entry_offset() const { return _verified_entry; }
|
||||
int exception_handler_offset() const { return _exception_handler_offset; }
|
||||
int deopt_handler_offset() const { return _deopt_handler_offset; }
|
||||
int deopt_mh_handler_offset() const { return _deopt_mh_handler_offset; }
|
||||
int orig_pc_offset() const { return _orig_pc_offset; }
|
||||
|
||||
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
|
||||
@ -148,7 +150,11 @@ private:
|
||||
|
||||
_scopes_data_begin = (address) _meta->scopes_data_begin();
|
||||
_deopt_handler_begin = (address) _code + _meta->deopt_handler_offset();
|
||||
_deopt_mh_handler_begin = (address) this;
|
||||
if (_meta->deopt_mh_handler_offset() != -1) {
|
||||
_deopt_mh_handler_begin = (address) _code + _meta->deopt_mh_handler_offset();
|
||||
} else {
|
||||
_deopt_mh_handler_begin = (address) this;
|
||||
}
|
||||
|
||||
_pc_desc_container.reset_to(scopes_pcs_begin());
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +30,7 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/timerTrace.hpp"
|
||||
|
@ -297,8 +297,6 @@ void CFGPrinterOutput::print_block(BlockBegin* block) {
|
||||
print_end("block");
|
||||
}
|
||||
|
||||
|
||||
|
||||
void CFGPrinterOutput::print_cfg(BlockList* blocks, const char* name) {
|
||||
print_begin("cfg");
|
||||
print("name \"%s\"", name);
|
||||
@ -321,16 +319,13 @@ void CFGPrinterOutput::print_cfg(IR* blocks, const char* name) {
|
||||
output()->flush();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void CFGPrinterOutput::print_intervals(IntervalList* intervals, const char* name) {
|
||||
print_begin("intervals");
|
||||
print("name \"%s\"", name);
|
||||
|
||||
for (int i = 0; i < intervals->length(); i++) {
|
||||
if (intervals->at(i) != NULL) {
|
||||
intervals->at(i)->print(output());
|
||||
intervals->at(i)->print_on(output());
|
||||
}
|
||||
}
|
||||
|
||||
@ -338,5 +333,4 @@ void CFGPrinterOutput::print_intervals(IntervalList* intervals, const char* name
|
||||
output()->flush();
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
#endif // NOT PRODUCT
|
||||
|
@ -700,7 +700,6 @@ void Compilation::compile_only_this_method() {
|
||||
compile_only_this_scope(&stream, hir()->top_scope());
|
||||
}
|
||||
|
||||
|
||||
void Compilation::compile_only_this_scope(outputStream* st, IRScope* scope) {
|
||||
st->print("CompileOnly=");
|
||||
scope->method()->holder()->name()->print_symbol_on(st);
|
||||
@ -709,7 +708,6 @@ void Compilation::compile_only_this_scope(outputStream* st, IRScope* scope) {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
||||
void Compilation::exclude_this_method() {
|
||||
fileStream stream(fopen(".hotspot_compiler", "at"));
|
||||
stream.print("exclude ");
|
||||
@ -719,4 +717,10 @@ void Compilation::exclude_this_method() {
|
||||
stream.cr();
|
||||
stream.cr();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Called from debugger to get the interval with 'reg_num' during register allocation.
|
||||
Interval* find_interval(int reg_num) {
|
||||
return Compilation::current()->allocator()->find_interval_at(reg_num);
|
||||
}
|
||||
|
||||
#endif // NOT PRODUCT
|
||||
|
@ -109,7 +109,6 @@ class Compilation: public StackObj {
|
||||
ExceptionInfoList* exception_info_list() const { return _exception_info_list; }
|
||||
ExceptionHandlerTable* exception_handler_table() { return &_exception_handler_table; }
|
||||
|
||||
LinearScan* allocator() { return _allocator; }
|
||||
void set_allocator(LinearScan* allocator) { _allocator = allocator; }
|
||||
|
||||
Instruction* _current_instruction; // the instruction currently being processed
|
||||
@ -150,6 +149,7 @@ class Compilation: public StackObj {
|
||||
Arena* arena() { return _arena; }
|
||||
bool has_access_indexed() { return _has_access_indexed; }
|
||||
bool should_install_code() { return _install_code && InstallMethods; }
|
||||
LinearScan* allocator() { return _allocator; }
|
||||
|
||||
// Instruction ids
|
||||
int get_next_id() { return _next_id++; }
|
||||
|
@ -440,7 +440,7 @@ class UseCountComputer: public ValueVisitor, BlockClosure {
|
||||
|
||||
|
||||
// helper macro for short definition of trace-output inside code
|
||||
#ifndef PRODUCT
|
||||
#ifdef ASSERT
|
||||
#define TRACE_LINEAR_SCAN(level, code) \
|
||||
if (TraceLinearScanLevel >= level) { \
|
||||
code; \
|
||||
@ -509,7 +509,7 @@ class ComputeLinearScanOrder : public StackObj {
|
||||
void compute_dominators();
|
||||
|
||||
// debug functions
|
||||
NOT_PRODUCT(void print_blocks();)
|
||||
DEBUG_ONLY(void print_blocks();)
|
||||
DEBUG_ONLY(void verify();)
|
||||
|
||||
Compilation* compilation() const { return _compilation; }
|
||||
@ -559,7 +559,7 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start
|
||||
compute_order(start_block);
|
||||
compute_dominators();
|
||||
|
||||
NOT_PRODUCT(print_blocks());
|
||||
DEBUG_ONLY(print_blocks());
|
||||
DEBUG_ONLY(verify());
|
||||
}
|
||||
|
||||
@ -1047,7 +1047,7 @@ void ComputeLinearScanOrder::compute_dominators() {
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
#ifdef ASSERT
|
||||
void ComputeLinearScanOrder::print_blocks() {
|
||||
if (TraceLinearScanLevel >= 2) {
|
||||
tty->print_cr("----- loop information:");
|
||||
@ -1104,9 +1104,7 @@ void ComputeLinearScanOrder::print_blocks() {
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void ComputeLinearScanOrder::verify() {
|
||||
assert(_linear_scan_order->length() == _num_blocks, "wrong number of blocks in list");
|
||||
|
||||
@ -1182,7 +1180,7 @@ void ComputeLinearScanOrder::verify() {
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif // ASSERT
|
||||
|
||||
|
||||
void IR::compute_code() {
|
||||
|
@ -46,17 +46,19 @@
|
||||
// helper macro for short definition of timer
|
||||
#define TIME_LINEAR_SCAN(timer_name) TraceTime _block_timer("", _total_timer.timer(LinearScanTimers::timer_name), TimeLinearScan || TimeEachLinearScan, Verbose);
|
||||
|
||||
#else
|
||||
#define TIME_LINEAR_SCAN(timer_name)
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
// helper macro for short definition of trace-output inside code
|
||||
#define TRACE_LINEAR_SCAN(level, code) \
|
||||
if (TraceLinearScanLevel >= level) { \
|
||||
code; \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define TIME_LINEAR_SCAN(timer_name)
|
||||
#define TRACE_LINEAR_SCAN(level, code)
|
||||
|
||||
#endif
|
||||
|
||||
// Map BasicType to spill size in 32-bit words, matching VMReg's notion of words
|
||||
@ -792,7 +794,7 @@ void LinearScan::compute_global_live_sets() {
|
||||
live_in.set_union(block->live_gen());
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
#ifdef ASSERT
|
||||
if (TraceLinearScanLevel >= 4) {
|
||||
char c = ' ';
|
||||
if (iteration_count == 0 || change_occurred_in_block) {
|
||||
@ -3201,7 +3203,47 @@ void LinearScan::print_lir(int level, const char* label, bool hir_valid) {
|
||||
}
|
||||
}
|
||||
|
||||
#endif //PRODUCT
|
||||
void LinearScan::print_reg_num(outputStream* out, int reg_num) {
|
||||
if (reg_num == -1) {
|
||||
out->print("[ANY]");
|
||||
return;
|
||||
} else if (reg_num >= LIR_OprDesc::vreg_base) {
|
||||
out->print("[VREG %d]", reg_num);
|
||||
return;
|
||||
}
|
||||
|
||||
LIR_Opr opr = LIR_OprFact::illegal();
|
||||
|
||||
#ifdef X86
|
||||
int last_xmm_reg = pd_last_xmm_reg;
|
||||
#ifdef _LP64
|
||||
if (UseAVX < 3) {
|
||||
last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
if (reg_num >= pd_first_cpu_reg && reg_num <= pd_last_cpu_reg) {
|
||||
opr = LIR_OprFact::single_cpu(reg_num);
|
||||
} else if (reg_num >= pd_first_fpu_reg && reg_num <= pd_last_fpu_reg) {
|
||||
opr = LIR_OprFact::single_fpu(reg_num - pd_first_fpu_reg);
|
||||
#ifdef X86
|
||||
} else if (reg_num >= pd_first_xmm_reg && reg_num <= last_xmm_reg) {
|
||||
opr = LIR_OprFact::single_xmm(reg_num - pd_first_xmm_reg);
|
||||
#endif
|
||||
} else {
|
||||
assert(false, "unknown register");
|
||||
}
|
||||
opr->print(out);
|
||||
}
|
||||
|
||||
Interval* LinearScan::find_interval_at(int reg_num) const {
|
||||
if (reg_num < 0 || reg_num >= _intervals.length()) {
|
||||
return NULL;
|
||||
}
|
||||
return interval_at(reg_num);
|
||||
}
|
||||
|
||||
#endif // PRODUCT
|
||||
|
||||
|
||||
// ********** verification functions for allocation
|
||||
@ -4556,49 +4598,28 @@ bool Interval::intersects_any_children_of(Interval* interval) const {
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Interval::print(outputStream* out) const {
|
||||
void Interval::print_on(outputStream* out) const {
|
||||
const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" };
|
||||
const char* UseKind2Name[] = { "N", "L", "S", "M" };
|
||||
|
||||
const char* type_name;
|
||||
LIR_Opr opr = LIR_OprFact::illegal();
|
||||
if (reg_num() < LIR_OprDesc::vreg_base) {
|
||||
type_name = "fixed";
|
||||
// need a temporary operand for fixed intervals because type() cannot be called
|
||||
#ifdef X86
|
||||
int last_xmm_reg = pd_last_xmm_reg;
|
||||
#ifdef _LP64
|
||||
if (UseAVX < 3) {
|
||||
last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) {
|
||||
opr = LIR_OprFact::single_cpu(assigned_reg());
|
||||
} else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
|
||||
opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
|
||||
#ifdef X86
|
||||
} else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= last_xmm_reg) {
|
||||
opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
|
||||
#endif
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
type_name = type2name(type());
|
||||
if (assigned_reg() != -1 &&
|
||||
(LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) {
|
||||
opr = LinearScan::calc_operand_for_interval(this);
|
||||
}
|
||||
}
|
||||
|
||||
out->print("%d %s ", reg_num(), type_name);
|
||||
if (opr->is_valid()) {
|
||||
out->print("\"");
|
||||
opr->print(out);
|
||||
out->print("\" ");
|
||||
if (reg_num() < LIR_OprDesc::vreg_base) {
|
||||
LinearScan::print_reg_num(out, assigned_reg());
|
||||
} else if (assigned_reg() != -1 && (LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) {
|
||||
LinearScan::calc_operand_for_interval(this)->print(out);
|
||||
} else {
|
||||
// Virtual register that has no assigned register yet.
|
||||
out->print("[ANY]");
|
||||
}
|
||||
out->print("%d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1));
|
||||
|
||||
out->print(" %d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1));
|
||||
|
||||
// print ranges
|
||||
Range* cur = _first;
|
||||
@ -4622,7 +4643,28 @@ void Interval::print(outputStream* out) const {
|
||||
out->print(" \"%s\"", SpillState2Name[spill_state()]);
|
||||
out->cr();
|
||||
}
|
||||
#endif
|
||||
|
||||
void Interval::print_parent() const {
|
||||
if (_split_parent != this) {
|
||||
_split_parent->print_on(tty);
|
||||
} else {
|
||||
tty->print_cr("Parent: this");
|
||||
}
|
||||
}
|
||||
|
||||
void Interval::print_children() const {
|
||||
if (_split_children == NULL) {
|
||||
tty->print_cr("Children: []");
|
||||
} else {
|
||||
tty->print_cr("Children:");
|
||||
for (int i = 0; i < _split_children->length(); i++) {
|
||||
tty->print("%d: ", i);
|
||||
_split_children->at(i)->print_on(tty);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // NOT PRODUCT
|
||||
|
||||
|
||||
|
||||
|
||||
@ -4727,7 +4769,7 @@ void IntervalWalker::walk_to(IntervalState state, int from) {
|
||||
if (cur->current_at_end()) {
|
||||
// move to handled state (not maintained as a list)
|
||||
cur->set_state(handledState);
|
||||
interval_moved(cur, kind, state, handledState);
|
||||
DEBUG_ONLY(interval_moved(cur, kind, state, handledState);)
|
||||
} else if (cur->current_from() <= from){
|
||||
// sort into active list
|
||||
append_sorted(active_first_addr(kind), cur);
|
||||
@ -4736,7 +4778,7 @@ void IntervalWalker::walk_to(IntervalState state, int from) {
|
||||
assert(state == activeState, "check");
|
||||
prev = cur->next_addr();
|
||||
}
|
||||
interval_moved(cur, kind, state, activeState);
|
||||
DEBUG_ONLY(interval_moved(cur, kind, state, activeState);)
|
||||
} else {
|
||||
// sort into inactive list
|
||||
append_sorted(inactive_first_addr(kind), cur);
|
||||
@ -4745,7 +4787,7 @@ void IntervalWalker::walk_to(IntervalState state, int from) {
|
||||
assert(state == inactiveState, "check");
|
||||
prev = cur->next_addr();
|
||||
}
|
||||
interval_moved(cur, kind, state, inactiveState);
|
||||
DEBUG_ONLY(interval_moved(cur, kind, state, inactiveState);)
|
||||
}
|
||||
} else {
|
||||
prev = cur->next_addr();
|
||||
@ -4801,7 +4843,7 @@ void IntervalWalker::walk_to(int lir_op_id) {
|
||||
current()->set_state(activeState);
|
||||
if (activate_current()) {
|
||||
append_sorted(active_first_addr(current_kind()), current());
|
||||
interval_moved(current(), current_kind(), unhandledState, activeState);
|
||||
DEBUG_ONLY(interval_moved(current(), current_kind(), unhandledState, activeState);)
|
||||
}
|
||||
|
||||
next_interval();
|
||||
@ -4811,8 +4853,8 @@ void IntervalWalker::walk_to(int lir_op_id) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void IntervalWalker::interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to) {
|
||||
#ifndef PRODUCT
|
||||
if (TraceLinearScanLevel >= 4) {
|
||||
#define print_state(state) \
|
||||
switch(state) {\
|
||||
@ -4829,10 +4871,8 @@ void IntervalWalker::interval_moved(Interval* interval, IntervalKind kind, Inter
|
||||
|
||||
#undef print_state
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
// **** Implementation of LinearScanWalker **************************
|
||||
|
||||
@ -5305,7 +5345,6 @@ void LinearScanWalker::split_and_spill_interval(Interval* it) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LinearScanWalker::find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) {
|
||||
int min_full_reg = any_reg;
|
||||
int max_partial_reg = any_reg;
|
||||
@ -5367,7 +5406,6 @@ int LinearScanWalker::find_free_double_reg(int reg_needed_until, int interval_to
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool LinearScanWalker::alloc_free_reg(Interval* cur) {
|
||||
TRACE_LINEAR_SCAN(2, tty->print("trying to find free register for "); cur->print());
|
||||
|
||||
@ -5381,8 +5419,16 @@ bool LinearScanWalker::alloc_free_reg(Interval* cur) {
|
||||
// _use_pos contains the start of the next interval that has this register assigned
|
||||
// (either as a fixed register or a normal allocated register in the past)
|
||||
// only intervals overlapping with cur are processed, non-overlapping invervals can be ignored safely
|
||||
TRACE_LINEAR_SCAN(4, tty->print_cr(" state of registers:"));
|
||||
TRACE_LINEAR_SCAN(4, for (int i = _first_reg; i <= _last_reg; i++) tty->print_cr(" reg %d: use_pos: %d", i, _use_pos[i]));
|
||||
#ifdef ASSERT
|
||||
if (TraceLinearScanLevel >= 4) {
|
||||
tty->print_cr(" state of registers:");
|
||||
for (int i = _first_reg; i <= _last_reg; i++) {
|
||||
tty->print(" reg %d (", i);
|
||||
LinearScan::print_reg_num(i);
|
||||
tty->print_cr("): use_pos: %d", _use_pos[i]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int hint_reg, hint_regHi;
|
||||
Interval* register_hint = cur->register_hint();
|
||||
@ -5394,8 +5440,16 @@ bool LinearScanWalker::alloc_free_reg(Interval* cur) {
|
||||
assert(hint_reg != any_reg && hint_regHi == any_reg, "must be for fixed intervals");
|
||||
hint_regHi = hint_reg + 1; // connect e.g. eax-edx
|
||||
}
|
||||
TRACE_LINEAR_SCAN(4, tty->print(" hint registers %d, %d from interval ", hint_reg, hint_regHi); register_hint->print());
|
||||
|
||||
#ifdef ASSERT
|
||||
if (TraceLinearScanLevel >= 4) {
|
||||
tty->print(" hint registers %d (", hint_reg);
|
||||
LinearScan::print_reg_num(hint_reg);
|
||||
tty->print("), %d (", hint_regHi);
|
||||
LinearScan::print_reg_num(hint_regHi);
|
||||
tty->print(") from interval ");
|
||||
register_hint->print();
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
hint_reg = any_reg;
|
||||
hint_regHi = any_reg;
|
||||
@ -5450,8 +5504,15 @@ bool LinearScanWalker::alloc_free_reg(Interval* cur) {
|
||||
}
|
||||
|
||||
cur->assign_reg(reg, regHi);
|
||||
TRACE_LINEAR_SCAN(2, tty->print_cr("selected register %d, %d", reg, regHi));
|
||||
|
||||
#ifdef ASSERT
|
||||
if (TraceLinearScanLevel >= 2) {
|
||||
tty->print(" selected registers %d (", reg);
|
||||
LinearScan::print_reg_num(reg);
|
||||
tty->print("), %d (", regHi);
|
||||
LinearScan::print_reg_num(regHi);
|
||||
tty->print_cr(")");
|
||||
}
|
||||
#endif
|
||||
assert(split_pos > 0, "invalid split_pos");
|
||||
if (need_split) {
|
||||
// register not available for full interval, so split it
|
||||
@ -5539,11 +5600,13 @@ void LinearScanWalker::alloc_locked_reg(Interval* cur) {
|
||||
spill_collect_active_any();
|
||||
spill_collect_inactive_any(cur);
|
||||
|
||||
#ifndef PRODUCT
|
||||
#ifdef ASSERT
|
||||
if (TraceLinearScanLevel >= 4) {
|
||||
tty->print_cr(" state of registers:");
|
||||
for (int i = _first_reg; i <= _last_reg; i++) {
|
||||
tty->print(" reg %d: use_pos: %d, block_pos: %d, intervals: ", i, _use_pos[i], _block_pos[i]);
|
||||
tty->print(" reg %d(", i);
|
||||
LinearScan::print_reg_num(i);
|
||||
tty->print("): use_pos: %d, block_pos: %d, intervals: ", _use_pos[i], _block_pos[i]);
|
||||
for (int j = 0; j < _spill_intervals[i]->length(); j++) {
|
||||
tty->print("%d ", _spill_intervals[i]->at(j)->reg_num());
|
||||
}
|
||||
@ -5613,7 +5676,15 @@ void LinearScanWalker::alloc_locked_reg(Interval* cur) {
|
||||
|
||||
split_and_spill_interval(cur);
|
||||
} else {
|
||||
TRACE_LINEAR_SCAN(4, tty->print_cr("decided to use register %d, %d", reg, regHi));
|
||||
#ifdef ASSERT
|
||||
if (TraceLinearScanLevel >= 4) {
|
||||
tty->print("decided to use register %d (", reg);
|
||||
LinearScan::print_reg_num(reg);
|
||||
tty->print("), %d (", regHi);
|
||||
LinearScan::print_reg_num(regHi);
|
||||
tty->print_cr(")");
|
||||
}
|
||||
#endif
|
||||
assert(reg != any_reg && (_num_phys_regs == 1 || regHi != any_reg), "no register found");
|
||||
assert(split_pos > 0, "invalid split_pos");
|
||||
assert(need_split == false || split_pos > cur->from(), "splitting interval at from");
|
||||
|
@ -367,6 +367,8 @@ class LinearScan : public CompilationResourceObj {
|
||||
static void print_bitmap(BitMap& bitmap);
|
||||
void print_intervals(const char* label);
|
||||
void print_lir(int level, const char* label, bool hir_valid = true);
|
||||
static void print_reg_num(int reg_num) { print_reg_num(tty, reg_num); }
|
||||
static void print_reg_num(outputStream* out, int reg_num);
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -390,10 +392,13 @@ class LinearScan : public CompilationResourceObj {
|
||||
int max_spills() const { return _max_spills; }
|
||||
int num_calls() const { assert(_num_calls >= 0, "not set"); return _num_calls; }
|
||||
|
||||
// entry functions for printing
|
||||
#ifndef PRODUCT
|
||||
// entry functions for printing
|
||||
static void print_statistics();
|
||||
static void print_timers(double total);
|
||||
|
||||
// Used for debugging
|
||||
Interval* find_interval_at(int reg_num) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -626,7 +631,15 @@ class Interval : public CompilationResourceObj {
|
||||
int current_intersects_at(Interval* it) { return _current->intersects_at(it->_current); };
|
||||
|
||||
// printing
|
||||
void print(outputStream* out = tty) const PRODUCT_RETURN;
|
||||
#ifndef PRODUCT
|
||||
void print() const { print_on(tty); }
|
||||
void print_on(outputStream* out) const;
|
||||
|
||||
// Used for debugging
|
||||
void print_parent() const;
|
||||
void print_children() const;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
||||
@ -674,9 +687,9 @@ class IntervalWalker : public CompilationResourceObj {
|
||||
// It is safe to append current to any interval list but the unhandled list.
|
||||
virtual bool activate_current() { return true; }
|
||||
|
||||
// interval_moved() is called whenever an interval moves from one interval list to another.
|
||||
// In the implementation of this method it is prohibited to move the interval to any list.
|
||||
virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);
|
||||
// This method is called whenever an interval moves from one interval list to another to print some
|
||||
// information about it and its state change if TraceLinearScanLevel is set appropriately.
|
||||
DEBUG_ONLY(void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);)
|
||||
|
||||
public:
|
||||
IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
|
||||
|
@ -5474,11 +5474,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
|
||||
ClassLoadingService::notify_class_loaded(ik, false /* not shared class */);
|
||||
|
||||
if (!is_internal()) {
|
||||
if (log_is_enabled(Info, class, load)) {
|
||||
ResourceMark rm;
|
||||
const char* module_name = (module_entry->name() == NULL) ? UNNAMED_MODULE : module_entry->name()->as_C_string();
|
||||
ik->print_class_load_logging(_loader_data, module_name, _stream);
|
||||
}
|
||||
ik->print_class_load_logging(_loader_data, module_entry, _stream);
|
||||
|
||||
if (ik->minor_version() == JAVA_PREVIEW_MINOR_VERSION &&
|
||||
ik->major_version() == JVM_CLASSFILE_MAJOR_VERSION &&
|
||||
@ -5985,44 +5981,6 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
|
||||
}
|
||||
ls.cr();
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (DumpLoadedClassList != NULL && stream->source() != NULL && classlist_file->is_open()) {
|
||||
if (!ClassLoader::has_jrt_entry()) {
|
||||
warning("DumpLoadedClassList and CDS are not supported in exploded build");
|
||||
DumpLoadedClassList = NULL;
|
||||
} else if (SystemDictionaryShared::is_sharing_possible(_loader_data) &&
|
||||
!_is_hidden &&
|
||||
_unsafe_anonymous_host == NULL) {
|
||||
// Only dump the classes that can be stored into CDS archive.
|
||||
// Hidden and unsafe anonymous classes such as generated LambdaForm classes are also not included.
|
||||
oop class_loader = _loader_data->class_loader();
|
||||
ResourceMark rm(THREAD);
|
||||
bool skip = false;
|
||||
if (class_loader == NULL || SystemDictionary::is_platform_class_loader(class_loader)) {
|
||||
// For the boot and platform class loaders, skip classes that are not found in the
|
||||
// java runtime image, such as those found in the --patch-module entries.
|
||||
// These classes can't be loaded from the archive during runtime.
|
||||
if (!stream->from_boot_loader_modules_image() && strncmp(stream->source(), "jrt:", 4) != 0) {
|
||||
skip = true;
|
||||
}
|
||||
|
||||
if (class_loader == NULL && ClassLoader::contains_append_entry(stream->source())) {
|
||||
// .. but don't skip the boot classes that are loaded from -Xbootclasspath/a
|
||||
// as they can be loaded from the archive during runtime.
|
||||
skip = false;
|
||||
}
|
||||
}
|
||||
if (skip) {
|
||||
tty->print_cr("skip writing class %s from source %s to classlist file",
|
||||
_class_name->as_C_string(), stream->source());
|
||||
} else {
|
||||
classlist_file->print_cr("%s", _class_name->as_C_string());
|
||||
classlist_file->flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// SUPERKLASS
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define SHARE_CLASSFILE_CLASSLOADER_HPP
|
||||
|
||||
#include "jimage.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
|
||||
// Next entry in class path
|
||||
inline ClassPathEntry* ClassPathEntry::next() const { return Atomic::load_acquire(&_next); }
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "memory/heapShared.inline.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvmtiEnvBase.hpp"
|
||||
#include "prims/jvmtiRedefineClasses.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_JFR
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -719,10 +719,7 @@ oop StringTable::lookup_shared(const jchar* name, int len, unsigned int hash) {
|
||||
|
||||
oop StringTable::create_archived_string(oop s, Thread* THREAD) {
|
||||
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
|
||||
|
||||
if (HeapShared::is_archived_object(s)) {
|
||||
return s;
|
||||
}
|
||||
assert(!HeapShared::is_archived_object(s), "sanity");
|
||||
|
||||
oop new_s = NULL;
|
||||
typeArrayOop v = java_lang_String::value_no_keepalive(s);
|
||||
@ -740,42 +737,34 @@ oop StringTable::create_archived_string(oop s, Thread* THREAD) {
|
||||
return new_s;
|
||||
}
|
||||
|
||||
struct CopyToArchive : StackObj {
|
||||
class CopyToArchive : StackObj {
|
||||
CompactHashtableWriter* _writer;
|
||||
public:
|
||||
CopyToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
|
||||
bool operator()(WeakHandle* val) {
|
||||
oop s = val->peek();
|
||||
if (s == NULL) {
|
||||
return true;
|
||||
}
|
||||
bool do_entry(oop s, bool value_ignored) {
|
||||
assert(s != NULL, "sanity");
|
||||
unsigned int hash = java_lang_String::hash_code(s);
|
||||
oop new_s = StringTable::create_archived_string(s, Thread::current());
|
||||
if (new_s == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
val->replace(new_s);
|
||||
// add to the compact table
|
||||
_writer->add(hash, CompressedOops::encode(new_s));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void StringTable::copy_shared_string_table(CompactHashtableWriter* writer) {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(), "must be");
|
||||
|
||||
CopyToArchive copy(writer);
|
||||
_local_table->do_safepoint_scan(copy);
|
||||
}
|
||||
|
||||
void StringTable::write_to_archive() {
|
||||
void StringTable::write_to_archive(const DumpedInternedStrings* dumped_interned_strings) {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(), "must be");
|
||||
|
||||
_shared_table.reset();
|
||||
CompactHashtableWriter writer(_items_count, &MetaspaceShared::stats()->string);
|
||||
|
||||
// Copy the interned strings into the "string space" within the java heap
|
||||
copy_shared_string_table(&writer);
|
||||
CopyToArchive copier(&writer);
|
||||
dumped_interned_strings->iterate(&copier);
|
||||
|
||||
writer.dump(&_shared_table, "string");
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,6 +32,7 @@
|
||||
#include "utilities/tableStatistics.hpp"
|
||||
|
||||
class CompactHashtableWriter;
|
||||
class DumpedInternedStrings;
|
||||
class JavaThread;
|
||||
class SerializeClosure;
|
||||
|
||||
@ -105,11 +106,10 @@ class StringTable : public CHeapObj<mtSymbol>{
|
||||
// Sharing
|
||||
private:
|
||||
static oop lookup_shared(const jchar* name, int len, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
|
||||
static void copy_shared_string_table(CompactHashtableWriter* ch_table) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
public:
|
||||
static oop create_archived_string(oop s, Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
|
||||
static void shared_oops_do(OopClosure* f) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void write_to_archive() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void write_to_archive(const DumpedInternedStrings* dumped_interned_strings) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
// Jcmd
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/archiveBuilder.hpp"
|
||||
#include "memory/dynamicArchive.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
@ -56,7 +57,6 @@ const size_t ON_STACK_BUFFER_LENGTH = 128;
|
||||
|
||||
inline bool symbol_equals_compact_hashtable_entry(Symbol* value, const char* key, int len) {
|
||||
if (value->equals(key, len)) {
|
||||
assert(value->is_permanent(), "must be shared");
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@ -176,11 +176,6 @@ void SymbolTable::create_table () {
|
||||
}
|
||||
|
||||
void SymbolTable::delete_symbol(Symbol* sym) {
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
// Do not delete symbols as we may be in the middle of preparing the
|
||||
// symbols for dumping.
|
||||
return;
|
||||
}
|
||||
if (sym->is_permanent()) {
|
||||
MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
|
||||
// Deleting permanent symbol should not occur very often (insert race condition),
|
||||
@ -225,9 +220,9 @@ Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap) {
|
||||
assert (len <= Symbol::max_length(), "should be checked by caller");
|
||||
|
||||
Symbol* sym;
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
// Need to make all symbols permanent -- or else some symbols may be GC'ed
|
||||
// during the archive dumping code that's executed outside of a safepoint.
|
||||
if (DumpSharedSpaces) {
|
||||
// TODO: Special handling of Symbol allocation for DumpSharedSpaces will be removed
|
||||
// in JDK-8250989
|
||||
c_heap = false;
|
||||
}
|
||||
if (c_heap) {
|
||||
@ -280,24 +275,6 @@ void SymbolTable::symbols_do(SymbolClosure *cl) {
|
||||
_local_table->do_safepoint_scan(sd);
|
||||
}
|
||||
|
||||
class MetaspacePointersDo : StackObj {
|
||||
MetaspaceClosure *_it;
|
||||
public:
|
||||
MetaspacePointersDo(MetaspaceClosure *it) : _it(it) {}
|
||||
bool operator()(Symbol** value) {
|
||||
assert(value != NULL, "expected valid value");
|
||||
assert(*value != NULL, "value should point to a symbol");
|
||||
_it->push(value);
|
||||
return true;
|
||||
};
|
||||
};
|
||||
|
||||
void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
Arguments::assert_is_dumping_archive();
|
||||
MetaspacePointersDo mpd(it);
|
||||
_local_table->do_safepoint_scan(mpd);
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::lookup_dynamic(const char* name,
|
||||
int len, unsigned int hash) {
|
||||
Symbol* sym = do_lookup(name, len, hash);
|
||||
@ -606,42 +583,37 @@ void SymbolTable::dump(outputStream* st, bool verbose) {
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
struct CopyToArchive : StackObj {
|
||||
CompactHashtableWriter* _writer;
|
||||
CopyToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
|
||||
bool operator()(Symbol** value) {
|
||||
assert(value != NULL, "expected valid value");
|
||||
assert(*value != NULL, "value should point to a symbol");
|
||||
Symbol* sym = *value;
|
||||
void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
|
||||
CompactHashtableWriter* writer) {
|
||||
int len = symbols->length();
|
||||
for (int i = 0; i < len; i++) {
|
||||
Symbol* sym = ArchiveBuilder::get_relocated_symbol(symbols->at(i));
|
||||
unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
|
||||
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
|
||||
"must not rehash during dumping");
|
||||
sym->set_permanent();
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
sym = DynamicArchive::original_to_target(sym);
|
||||
sym = DynamicArchive::buffer_to_target(sym);
|
||||
}
|
||||
_writer->add(fixed_hash, MetaspaceShared::object_delta_u4(sym));
|
||||
return true;
|
||||
writer->add(fixed_hash, MetaspaceShared::object_delta_u4(sym));
|
||||
}
|
||||
};
|
||||
|
||||
void SymbolTable::copy_shared_symbol_table(CompactHashtableWriter* writer) {
|
||||
CopyToArchive copy(writer);
|
||||
_local_table->do_safepoint_scan(copy);
|
||||
}
|
||||
|
||||
size_t SymbolTable::estimate_size_for_archive() {
|
||||
return CompactHashtableWriter::estimate_size(int(_items_count));
|
||||
}
|
||||
|
||||
void SymbolTable::write_to_archive(bool is_static_archive) {
|
||||
void SymbolTable::write_to_archive(GrowableArray<Symbol*>* symbols) {
|
||||
CompactHashtableWriter writer(int(_items_count),
|
||||
&MetaspaceShared::stats()->symbol);
|
||||
copy_shared_symbol_table(&writer);
|
||||
if (is_static_archive) {
|
||||
copy_shared_symbol_table(symbols, &writer);
|
||||
if (!DynamicDumpSharedSpaces) {
|
||||
_shared_table.reset();
|
||||
writer.dump(&_shared_table, "symbol");
|
||||
|
||||
// Verify table is correct
|
||||
// Verify the written shared table is correct -- at this point,
|
||||
// vmSymbols has already been relocated to point to the archived
|
||||
// version of the Symbols.
|
||||
Symbol* sym = vmSymbols::java_lang_Object();
|
||||
const char* name = (const char*)sym->bytes();
|
||||
int len = sym->utf8_length();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,6 +31,7 @@
|
||||
#include "utilities/tableStatistics.hpp"
|
||||
|
||||
class JavaThread;
|
||||
template <typename T> class GrowableArray;
|
||||
|
||||
// TempNewSymbol acts as a handle class in a handle/body idiom and is
|
||||
// responsible for proper resource management of the body (which is a Symbol*).
|
||||
@ -207,13 +208,13 @@ public:
|
||||
|
||||
// Sharing
|
||||
private:
|
||||
static void copy_shared_symbol_table(CompactHashtableWriter* ch_table);
|
||||
static void copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
|
||||
CompactHashtableWriter* ch_table);
|
||||
public:
|
||||
static size_t estimate_size_for_archive() NOT_CDS_RETURN_(0);
|
||||
static void write_to_archive(bool is_static_archive = true) NOT_CDS_RETURN;
|
||||
static void write_to_archive(GrowableArray<Symbol*>* symbols) NOT_CDS_RETURN;
|
||||
static void serialize_shared_table_header(SerializeClosure* soc,
|
||||
bool is_static_archive = true) NOT_CDS_RETURN;
|
||||
static void metaspace_pointers_do(MetaspaceClosure* it);
|
||||
|
||||
// Jcmd
|
||||
static void dump(outputStream* st, bool verbose=false);
|
||||
|
@ -1470,15 +1470,6 @@ void SystemDictionary::load_shared_class_misc(InstanceKlass* ik, ClassLoaderData
|
||||
ik->set_classpath_index(path_index, THREAD);
|
||||
}
|
||||
|
||||
if (DumpLoadedClassList != NULL && classlist_file->is_open()) {
|
||||
// Only dump the classes that can be stored into CDS archive
|
||||
if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
|
||||
ResourceMark rm(THREAD);
|
||||
classlist_file->print_cr("%s", ik->name()->as_C_string());
|
||||
classlist_file->flush();
|
||||
}
|
||||
}
|
||||
|
||||
// notify a class loaded from shared object
|
||||
ClassLoadingService::notify_class_loaded(ik, true /* shared class */);
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "classfile/verifier.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
VerificationType VerificationType::from_tag(u1 tag) {
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/fieldDescriptor.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -330,7 +330,7 @@ public:
|
||||
// Deopt
|
||||
// Return true is the PC is one would expect if the frame is being deopted.
|
||||
inline bool is_deopt_pc(address pc);
|
||||
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
|
||||
inline bool is_deopt_mh_entry(address pc);
|
||||
inline bool is_deopt_entry(address pc);
|
||||
|
||||
virtual bool can_convert_to_zombie() = 0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,6 +41,14 @@ inline bool CompiledMethod::is_deopt_entry(address pc) {
|
||||
;
|
||||
}
|
||||
|
||||
inline bool CompiledMethod::is_deopt_mh_entry(address pc) {
|
||||
return pc == deopt_mh_handler_begin()
|
||||
#if INCLUDE_JVMCI
|
||||
|| (is_compiled_by_jvmci() && pc == (deopt_mh_handler_begin() + NativeCall::instruction_size))
|
||||
#endif
|
||||
;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// CompiledMethod::get_deopt_original_pc
|
||||
//
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "code/debugInfoRec.hpp"
|
||||
#include "code/pcDesc.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "compiler/compilerDefinitions.hpp"
|
||||
@ -402,17 +403,6 @@ bool CompilerConfig::check_args_consistency(bool status) {
|
||||
}
|
||||
FLAG_SET_CMDLINE(PostLoopMultiversioning, false);
|
||||
}
|
||||
if (UseCountedLoopSafepoints && LoopStripMiningIter == 0) {
|
||||
if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
|
||||
warning("When counted loop safepoints are enabled, LoopStripMiningIter must be at least 1 (a safepoint every 1 iteration): setting it to 1");
|
||||
}
|
||||
LoopStripMiningIter = 1;
|
||||
} else if (!UseCountedLoopSafepoints && LoopStripMiningIter > 0) {
|
||||
if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
|
||||
warning("Disabling counted safepoints implies no loop strip mining: setting LoopStripMiningIter to 0");
|
||||
}
|
||||
LoopStripMiningIter = 0;
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
if (Arguments::is_interpreter_only()) {
|
||||
|
@ -1609,6 +1609,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
|
||||
{
|
||||
GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
|
||||
|
||||
#ifdef ASSERT
|
||||
log_develop_debug(gc, marking)(
|
||||
"add_obj_count=" SIZE_FORMAT " "
|
||||
"add_obj_bytes=" SIZE_FORMAT,
|
||||
@ -1619,6 +1620,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
|
||||
"mark_bitmap_bytes=" SIZE_FORMAT,
|
||||
mark_bitmap_count,
|
||||
mark_bitmap_size * HeapWordSize);
|
||||
#endif // ASSERT
|
||||
|
||||
// Quick summarization of each space into itself, to see how much is live.
|
||||
summarize_spaces_quick();
|
||||
|
@ -1146,16 +1146,6 @@ bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, ui
|
||||
case Op_ShenandoahWeakCompareAndSwapP:
|
||||
case Op_ShenandoahCompareAndExchangeP:
|
||||
case Op_ShenandoahCompareAndExchangeN:
|
||||
#ifdef ASSERT
|
||||
if( VerifyOptoOopOffsets ) {
|
||||
MemNode* mem = n->as_Mem();
|
||||
// Check to see if address types have grounded out somehow.
|
||||
const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
|
||||
ciInstanceKlass *k = tp->klass()->as_instance_klass();
|
||||
bool oop_offset_is_sane = k->contains_field_offset(tp->offset());
|
||||
assert( !tp || oop_offset_is_sane, "" );
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
case Op_ShenandoahLoadReferenceBarrier:
|
||||
assert(false, "should have been expanded already");
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc/shared/gcArguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
||||
void ZArguments::initialize_alignments() {
|
||||
SpaceAlignment = ZGranuleSize;
|
||||
|
@ -26,39 +26,21 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
template <typename T>
|
||||
class ZArray {
|
||||
private:
|
||||
static const size_t initial_capacity = 32;
|
||||
|
||||
T* _array;
|
||||
size_t _size;
|
||||
size_t _capacity;
|
||||
|
||||
NONCOPYABLE(ZArray);
|
||||
|
||||
void expand(size_t new_capacity);
|
||||
|
||||
class ZArray : public GrowableArrayCHeap<T, mtGC> {
|
||||
public:
|
||||
ZArray();
|
||||
~ZArray();
|
||||
|
||||
size_t size() const;
|
||||
bool is_empty() const;
|
||||
|
||||
T at(size_t index) const;
|
||||
|
||||
void add(T value);
|
||||
void transfer(ZArray<T>* from);
|
||||
void clear();
|
||||
};
|
||||
|
||||
template <typename T, bool parallel>
|
||||
class ZArrayIteratorImpl : public StackObj {
|
||||
private:
|
||||
ZArray<T>* const _array;
|
||||
size_t _next;
|
||||
int _next;
|
||||
|
||||
public:
|
||||
ZArrayIteratorImpl(ZArray<T>* array);
|
||||
@ -70,16 +52,7 @@ public:
|
||||
#define ZARRAY_SERIAL false
|
||||
#define ZARRAY_PARALLEL true
|
||||
|
||||
template <typename T>
|
||||
class ZArrayIterator : public ZArrayIteratorImpl<T, ZARRAY_SERIAL> {
|
||||
public:
|
||||
ZArrayIterator(ZArray<T>* array);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZArrayParallelIterator : public ZArrayIteratorImpl<T, ZARRAY_PARALLEL> {
|
||||
public:
|
||||
ZArrayParallelIterator(ZArray<T>* array);
|
||||
};
|
||||
template <typename T> using ZArrayIterator = ZArrayIteratorImpl<T, ZARRAY_SERIAL>;
|
||||
template <typename T> using ZArrayParallelIterator = ZArrayIteratorImpl<T, ZARRAY_PARALLEL>;
|
||||
|
||||
#endif // SHARE_GC_Z_ZARRAY_HPP
|
||||
|
@ -30,67 +30,17 @@
|
||||
|
||||
template <typename T>
|
||||
inline ZArray<T>::ZArray() :
|
||||
_array(NULL),
|
||||
_size(0),
|
||||
_capacity(0) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZArray<T>::~ZArray() {
|
||||
FREE_C_HEAP_ARRAY(T, _array);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline size_t ZArray<T>::size() const {
|
||||
return _size;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool ZArray<T>::is_empty() const {
|
||||
return size() == 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T ZArray<T>::at(size_t index) const {
|
||||
assert(index < _size, "Index out of bounds");
|
||||
return _array[index];
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZArray<T>::expand(size_t new_capacity) {
|
||||
T* new_array = NEW_C_HEAP_ARRAY(T, new_capacity, mtGC);
|
||||
if (_array != NULL) {
|
||||
memcpy(new_array, _array, sizeof(T) * _capacity);
|
||||
FREE_C_HEAP_ARRAY(T, _array);
|
||||
}
|
||||
|
||||
_array = new_array;
|
||||
_capacity = new_capacity;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZArray<T>::add(T value) {
|
||||
if (_size == _capacity) {
|
||||
const size_t new_capacity = (_capacity > 0) ? _capacity * 2 : initial_capacity;
|
||||
expand(new_capacity);
|
||||
}
|
||||
|
||||
_array[_size++] = value;
|
||||
}
|
||||
GrowableArrayCHeap<T, mtGC>(0) {}
|
||||
|
||||
template <typename T>
|
||||
inline void ZArray<T>::transfer(ZArray<T>* from) {
|
||||
assert(_array == NULL, "Should be empty");
|
||||
_array = from->_array;
|
||||
_size = from->_size;
|
||||
_capacity = from->_capacity;
|
||||
from->_array = NULL;
|
||||
from->_size = 0;
|
||||
from->_capacity = 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZArray<T>::clear() {
|
||||
_size = 0;
|
||||
assert(this->_data == NULL, "Should be empty");
|
||||
this->_data = from->_data;
|
||||
this->_len = from->_len;
|
||||
this->_max = from->_max;
|
||||
from->_data = NULL;
|
||||
from->_len = 0;
|
||||
from->_max = 0;
|
||||
}
|
||||
|
||||
template <typename T, bool parallel>
|
||||
@ -101,13 +51,13 @@ inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
|
||||
template <typename T, bool parallel>
|
||||
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
|
||||
if (parallel) {
|
||||
const size_t next = Atomic::fetch_and_add(&_next, 1u);
|
||||
if (next < _array->size()) {
|
||||
const int next = Atomic::fetch_and_add(&_next, 1);
|
||||
if (next < _array->length()) {
|
||||
*elem = _array->at(next);
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (_next < _array->size()) {
|
||||
if (_next < _array->length()) {
|
||||
*elem = _array->at(_next++);
|
||||
return true;
|
||||
}
|
||||
@ -117,12 +67,4 @@ inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline ZArrayIterator<T>::ZArrayIterator(ZArray<T>* array) :
|
||||
ZArrayIteratorImpl<T, ZARRAY_SERIAL>(array) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZArrayParallelIterator<T>::ZArrayParallelIterator(ZArray<T>* array) :
|
||||
ZArrayIteratorImpl<T, ZARRAY_PARALLEL>(array) {}
|
||||
|
||||
#endif // SHARE_GC_Z_ZARRAY_INLINE_HPP
|
||||
|
@ -30,16 +30,7 @@
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
ZPage::ZPage(const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) :
|
||||
_type(type_from_size(vmem.size())),
|
||||
_numa_id((uint8_t)-1),
|
||||
_seqnum(0),
|
||||
_virtual(vmem),
|
||||
_top(start()),
|
||||
_livemap(object_max_count()),
|
||||
_last_used(0),
|
||||
_physical(pmem) {
|
||||
assert_initialized();
|
||||
}
|
||||
ZPage(type_from_size(vmem.size()), vmem, pmem) {}
|
||||
|
||||
ZPage::ZPage(uint8_t type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) :
|
||||
_type(type),
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zArray.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
@ -40,102 +41,58 @@
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory() :
|
||||
_nsegments_max(0),
|
||||
_nsegments(0),
|
||||
_segments(NULL) {}
|
||||
_segments() {}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
|
||||
_nsegments_max(0),
|
||||
_nsegments(0),
|
||||
_segments(NULL) {
|
||||
_segments() {
|
||||
add_segment(segment);
|
||||
}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
|
||||
_nsegments_max(0),
|
||||
_nsegments(0),
|
||||
_segments(NULL) {
|
||||
_segments() {
|
||||
add_segments(pmem);
|
||||
}
|
||||
|
||||
const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
|
||||
remove_segments();
|
||||
add_segments(pmem);
|
||||
return *this;
|
||||
}
|
||||
// Free segments
|
||||
_segments.clear_and_deallocate();
|
||||
|
||||
ZPhysicalMemory::~ZPhysicalMemory() {
|
||||
remove_segments();
|
||||
// Copy segments
|
||||
add_segments(pmem);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemory::size() const {
|
||||
size_t size = 0;
|
||||
|
||||
for (uint32_t i = 0; i < _nsegments; i++) {
|
||||
size += _segments[i].size();
|
||||
for (int i = 0; i < _segments.length(); i++) {
|
||||
size += _segments.at(i).size();
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
|
||||
assert(index <= _nsegments, "Invalid index");
|
||||
|
||||
ZPhysicalMemorySegment* const from_segments = _segments;
|
||||
|
||||
if (_nsegments + 1 > _nsegments_max) {
|
||||
// Resize array
|
||||
_nsegments_max = round_up_power_of_2(_nsegments_max + 1);
|
||||
_segments = new ZPhysicalMemorySegment[_nsegments_max];
|
||||
|
||||
// Copy segments before index
|
||||
for (uint32_t i = 0; i < index; i++) {
|
||||
_segments[i] = from_segments[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Copy/Move segments after index
|
||||
for (uint32_t i = _nsegments; i > index; i--) {
|
||||
_segments[i] = from_segments[i - 1];
|
||||
}
|
||||
|
||||
// Insert new segment
|
||||
_segments[index] = ZPhysicalMemorySegment(start, size, committed);
|
||||
_nsegments++;
|
||||
|
||||
// Delete old array
|
||||
if (from_segments != _segments) {
|
||||
delete [] from_segments;
|
||||
}
|
||||
void ZPhysicalMemory::insert_segment(int index, uintptr_t start, size_t size, bool committed) {
|
||||
_segments.insert_before(index, ZPhysicalMemorySegment(start, size, committed));
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
|
||||
assert(index < _nsegments, "Invalid index");
|
||||
_segments[index] = ZPhysicalMemorySegment(start, size, committed);;
|
||||
void ZPhysicalMemory::replace_segment(int index, uintptr_t start, size_t size, bool committed) {
|
||||
_segments.at_put(index, ZPhysicalMemorySegment(start, size, committed));
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::remove_segment(uint32_t index) {
|
||||
assert(index < _nsegments, "Invalid index");
|
||||
|
||||
// Move segments after index
|
||||
for (uint32_t i = index + 1; i < _nsegments; i++) {
|
||||
_segments[i - 1] = _segments[i];
|
||||
}
|
||||
|
||||
_nsegments--;
|
||||
void ZPhysicalMemory::remove_segment(int index) {
|
||||
_segments.remove_at(index);
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
add_segment(pmem.segment(i));
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::remove_segments() {
|
||||
delete [] _segments;
|
||||
_segments = NULL;
|
||||
_nsegments_max = 0;
|
||||
_nsegments = 0;
|
||||
_segments.clear_and_deallocate();
|
||||
}
|
||||
|
||||
static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
|
||||
@ -144,29 +101,29 @@ static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMem
|
||||
|
||||
void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
|
||||
// Insert segments in address order, merge segments when possible
|
||||
for (uint32_t i = _nsegments; i > 0; i--) {
|
||||
const uint32_t current = i - 1;
|
||||
for (int i = _segments.length(); i > 0; i--) {
|
||||
const int current = i - 1;
|
||||
|
||||
if (_segments[current].end() <= segment.start()) {
|
||||
if (is_mergable(_segments[current], segment)) {
|
||||
if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
|
||||
if (_segments.at(current).end() <= segment.start()) {
|
||||
if (is_mergable(_segments.at(current), segment)) {
|
||||
if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) {
|
||||
// Merge with end of current segment and start of next segment
|
||||
const size_t start = _segments[current].start();
|
||||
const size_t size = _segments[current].size() + segment.size() + _segments[current + 1].size();
|
||||
const size_t start = _segments.at(current).start();
|
||||
const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size();
|
||||
replace_segment(current, start, size, segment.is_committed());
|
||||
remove_segment(current + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
// Merge with end of current segment
|
||||
const size_t start = _segments[current].start();
|
||||
const size_t size = _segments[current].size() + segment.size();
|
||||
const size_t start = _segments.at(current).start();
|
||||
const size_t size = _segments.at(current).size() + segment.size();
|
||||
replace_segment(current, start, size, segment.is_committed());
|
||||
return;
|
||||
} else if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
|
||||
} else if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) {
|
||||
// Merge with start of next segment
|
||||
const size_t start = segment.start();
|
||||
const size_t size = segment.size() + _segments[current + 1].size();
|
||||
const size_t size = segment.size() + _segments.at(current + 1).size();
|
||||
replace_segment(current + 1, start, size, segment.is_committed());
|
||||
return;
|
||||
}
|
||||
@ -177,10 +134,10 @@ void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
|
||||
}
|
||||
}
|
||||
|
||||
if (_nsegments > 0 && is_mergable(segment, _segments[0])) {
|
||||
if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) {
|
||||
// Merge with start of first segment
|
||||
const size_t start = segment.start();
|
||||
const size_t size = segment.size() + _segments[0].size();
|
||||
const size_t size = segment.size() + _segments.at(0).size();
|
||||
replace_segment(0, start, size, segment.is_committed());
|
||||
return;
|
||||
}
|
||||
@ -189,41 +146,43 @@ void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
|
||||
insert_segment(0, segment.start(), segment.size(), segment.is_committed());
|
||||
}
|
||||
|
||||
bool ZPhysicalMemory::commit_segment(uint32_t index, size_t size) {
|
||||
assert(index < _nsegments, "Invalid index");
|
||||
assert(size <= _segments[index].size(), "Invalid size");
|
||||
assert(!_segments[index].is_committed(), "Invalid state");
|
||||
bool ZPhysicalMemory::commit_segment(int index, size_t size) {
|
||||
ZPhysicalMemorySegment& segment = _segments.at(index);
|
||||
|
||||
if (size == _segments[index].size()) {
|
||||
assert(size <= segment.size(), "Invalid size");
|
||||
assert(!segment.is_committed(), "Invalid state");
|
||||
|
||||
if (size == segment.size()) {
|
||||
// Completely committed
|
||||
_segments[index].set_committed(true);
|
||||
segment.set_committed(true);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
// Partially committed, split segment
|
||||
insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, false /* committed */);
|
||||
replace_segment(index, _segments[index].start(), size, true /* committed */);
|
||||
insert_segment(index + 1, segment.start() + size, segment.size() - size, false /* committed */);
|
||||
replace_segment(index, segment.start(), size, true /* committed */);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemory::uncommit_segment(uint32_t index, size_t size) {
|
||||
assert(index < _nsegments, "Invalid index");
|
||||
assert(size <= _segments[index].size(), "Invalid size");
|
||||
assert(_segments[index].is_committed(), "Invalid state");
|
||||
bool ZPhysicalMemory::uncommit_segment(int index, size_t size) {
|
||||
ZPhysicalMemorySegment& segment = _segments.at(index);
|
||||
|
||||
if (size == _segments[index].size()) {
|
||||
assert(size <= segment.size(), "Invalid size");
|
||||
assert(segment.is_committed(), "Invalid state");
|
||||
|
||||
if (size == segment.size()) {
|
||||
// Completely uncommitted
|
||||
_segments[index].set_committed(false);
|
||||
segment.set_committed(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
// Partially uncommitted, split segment
|
||||
insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, true /* committed */);
|
||||
replace_segment(index, _segments[index].start(), size, false /* committed */);
|
||||
insert_segment(index + 1, segment.start() + size, segment.size() - size, true /* committed */);
|
||||
replace_segment(index, segment.start(), size, false /* committed */);
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -231,10 +190,10 @@ bool ZPhysicalMemory::uncommit_segment(uint32_t index, size_t size) {
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
|
||||
ZPhysicalMemory pmem;
|
||||
uint32_t nsegments = 0;
|
||||
int nsegments = 0;
|
||||
|
||||
for (uint32_t i = 0; i < _nsegments; i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments[i];
|
||||
for (int i = 0; i < _segments.length(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments.at(i);
|
||||
if (pmem.size() < size) {
|
||||
if (pmem.size() + segment.size() <= size) {
|
||||
// Transfer segment
|
||||
@ -243,35 +202,35 @@ ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
|
||||
// Split segment
|
||||
const size_t split_size = size - pmem.size();
|
||||
pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
|
||||
_segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed());
|
||||
_segments.at_put(nsegments++, ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed()));
|
||||
}
|
||||
} else {
|
||||
// Keep segment
|
||||
_segments[nsegments++] = segment;
|
||||
_segments.at_put(nsegments++, segment);
|
||||
}
|
||||
}
|
||||
|
||||
_nsegments = nsegments;
|
||||
_segments.trunc_to(nsegments);
|
||||
|
||||
return pmem;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemory::split_committed() {
|
||||
ZPhysicalMemory pmem;
|
||||
uint32_t nsegments = 0;
|
||||
int nsegments = 0;
|
||||
|
||||
for (uint32_t i = 0; i < _nsegments; i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments[i];
|
||||
for (int i = 0; i < _segments.length(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments.at(i);
|
||||
if (segment.is_committed()) {
|
||||
// Transfer segment
|
||||
pmem.add_segment(segment);
|
||||
} else {
|
||||
// Keep segment
|
||||
_segments[nsegments++] = segment;
|
||||
_segments.at_put(nsegments++, segment);
|
||||
}
|
||||
}
|
||||
|
||||
_nsegments = nsegments;
|
||||
_segments.trunc_to(nsegments);
|
||||
|
||||
return pmem;
|
||||
}
|
||||
@ -349,7 +308,7 @@ void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
|
||||
|
||||
void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
|
||||
// Free segments
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
_manager.free(segment.start(), segment.size());
|
||||
}
|
||||
@ -357,7 +316,7 @@ void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
|
||||
|
||||
bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
|
||||
// Commit segments
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
if (segment.is_committed()) {
|
||||
// Segment already committed
|
||||
@ -378,7 +337,7 @@ bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
|
||||
|
||||
bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
|
||||
// Commit segments
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
if (!segment.is_committed()) {
|
||||
// Segment already uncommitted
|
||||
@ -406,7 +365,7 @@ void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pme
|
||||
size_t size = 0;
|
||||
|
||||
// Map segments
|
||||
for (uint32_t i = 0; i < pmem.nsegments(); i++) {
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
_backing.map(addr + size, segment.size(), segment.start());
|
||||
size += segment.size();
|
||||
|
@ -24,6 +24,7 @@
|
||||
#ifndef SHARE_GC_Z_ZPHYSICALMEMORY_HPP
|
||||
#define SHARE_GC_Z_ZPHYSICALMEMORY_HPP
|
||||
|
||||
#include "gc/z/zArray.hpp"
|
||||
#include "gc/z/zMemory.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include OS_HEADER(gc/z/zPhysicalMemoryBacking)
|
||||
@ -48,33 +49,30 @@ public:
|
||||
|
||||
class ZPhysicalMemory {
|
||||
private:
|
||||
uint32_t _nsegments_max;
|
||||
uint32_t _nsegments;
|
||||
ZPhysicalMemorySegment* _segments;
|
||||
ZArray<ZPhysicalMemorySegment> _segments;
|
||||
|
||||
void insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed);
|
||||
void replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed);
|
||||
void remove_segment(uint32_t index);
|
||||
void insert_segment(int index, uintptr_t start, size_t size, bool committed);
|
||||
void replace_segment(int index, uintptr_t start, size_t size, bool committed);
|
||||
void remove_segment(int index);
|
||||
|
||||
public:
|
||||
ZPhysicalMemory();
|
||||
ZPhysicalMemory(const ZPhysicalMemorySegment& segment);
|
||||
ZPhysicalMemory(const ZPhysicalMemory& pmem);
|
||||
const ZPhysicalMemory& operator=(const ZPhysicalMemory& pmem);
|
||||
~ZPhysicalMemory();
|
||||
|
||||
bool is_null() const;
|
||||
size_t size() const;
|
||||
|
||||
uint32_t nsegments() const;
|
||||
const ZPhysicalMemorySegment& segment(uint32_t index) const;
|
||||
int nsegments() const;
|
||||
const ZPhysicalMemorySegment& segment(int index) const;
|
||||
|
||||
void add_segments(const ZPhysicalMemory& pmem);
|
||||
void remove_segments();
|
||||
|
||||
void add_segment(const ZPhysicalMemorySegment& segment);
|
||||
bool commit_segment(uint32_t index, size_t size);
|
||||
bool uncommit_segment(uint32_t index, size_t size);
|
||||
bool commit_segment(int index, size_t size);
|
||||
bool uncommit_segment(int index, size_t size);
|
||||
|
||||
ZPhysicalMemory split(size_t size);
|
||||
ZPhysicalMemory split_committed();
|
||||
|
@ -58,16 +58,15 @@ inline void ZPhysicalMemorySegment::set_committed(bool committed) {
|
||||
}
|
||||
|
||||
inline bool ZPhysicalMemory::is_null() const {
|
||||
return _nsegments == 0;
|
||||
return _segments.length() == 0;
|
||||
}
|
||||
|
||||
inline uint32_t ZPhysicalMemory::nsegments() const {
|
||||
return _nsegments;
|
||||
inline int ZPhysicalMemory::nsegments() const {
|
||||
return _segments.length();
|
||||
}
|
||||
|
||||
inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(uint32_t index) const {
|
||||
assert(index < _nsegments, "Invalid segment index");
|
||||
return _segments[index];
|
||||
inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(int index) const {
|
||||
return _segments.at(index);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
|
||||
|
@ -66,7 +66,7 @@ void ZRelocationSetSelectorGroup::register_live_page(ZPage* page) {
|
||||
const size_t garbage = size - live;
|
||||
|
||||
if (garbage > _fragmentation_limit) {
|
||||
_registered_pages.add(page);
|
||||
_registered_pages.append(page);
|
||||
}
|
||||
|
||||
_stats._npages++;
|
||||
@ -100,7 +100,7 @@ void ZRelocationSetSelectorGroup::semi_sort() {
|
||||
const size_t npartitions = (size_t)1 << npartitions_shift;
|
||||
const size_t partition_size = _page_size >> npartitions_shift;
|
||||
const size_t partition_size_shift = exact_log2(partition_size);
|
||||
const size_t npages = _registered_pages.size();
|
||||
const size_t npages = _registered_pages.length();
|
||||
|
||||
// Partition slots/fingers
|
||||
size_t partitions[npartitions];
|
||||
@ -140,7 +140,7 @@ void ZRelocationSetSelectorGroup::select_inner() {
|
||||
// Calculate the number of pages to relocate by successively including pages in
|
||||
// a candidate relocation set and calculate the maximum space requirement for
|
||||
// their live objects.
|
||||
const size_t npages = _registered_pages.size();
|
||||
const size_t npages = _registered_pages.length();
|
||||
size_t selected_from = 0;
|
||||
size_t selected_to = 0;
|
||||
size_t from_size = 0;
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
#include "gc/z/zRuntimeWorkers.hpp"
|
||||
#include "gc/z/zThread.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
class ZRuntimeWorkersInitializeTask : public AbstractGangTask {
|
||||
|
@ -39,7 +39,7 @@ template <typename T>
|
||||
bool ZSafeDeleteImpl<T>::deferred_delete(ItemT* item) {
|
||||
ZLocker<ZLock> locker(_lock);
|
||||
if (_enabled > 0) {
|
||||
_deferred.add(item);
|
||||
_deferred.append(item);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -96,33 +96,10 @@ public:
|
||||
void set_all(const T& value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZContended : public ZValue<ZContendedStorage, T> {
|
||||
public:
|
||||
ZContended();
|
||||
ZContended(const T& value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerCPU : public ZValue<ZPerCPUStorage, T> {
|
||||
public:
|
||||
ZPerCPU();
|
||||
ZPerCPU(const T& value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerNUMA : public ZValue<ZPerNUMAStorage, T> {
|
||||
public:
|
||||
ZPerNUMA();
|
||||
ZPerNUMA(const T& value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerWorker : public ZValue<ZPerWorkerStorage, T> {
|
||||
public:
|
||||
ZPerWorker();
|
||||
ZPerWorker(const T& value);
|
||||
};
|
||||
template <typename T> using ZContended = ZValue<ZContendedStorage, T>;
|
||||
template <typename T> using ZPerCPU = ZValue<ZPerCPUStorage, T>;
|
||||
template <typename T> using ZPerNUMA = ZValue<ZPerNUMAStorage, T>;
|
||||
template <typename T> using ZPerWorker = ZValue<ZPerWorkerStorage, T>;
|
||||
|
||||
//
|
||||
// Iterator
|
||||
@ -140,23 +117,9 @@ public:
|
||||
bool next(T** value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerCPUIterator : public ZValueIterator<ZPerCPUStorage, T> {
|
||||
public:
|
||||
ZPerCPUIterator(ZPerCPU<T>* value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerNUMAIterator : public ZValueIterator<ZPerNUMAStorage, T> {
|
||||
public:
|
||||
ZPerNUMAIterator(ZPerNUMA<T>* value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerWorkerIterator : public ZValueIterator<ZPerWorkerStorage, T> {
|
||||
public:
|
||||
ZPerWorkerIterator(ZPerWorker<T>* value);
|
||||
};
|
||||
template <typename T> using ZPerCPUIterator = ZValueIterator<ZPerCPUStorage, T>;
|
||||
template <typename T> using ZPerNUMAIterator = ZValueIterator<ZPerNUMAStorage, T>;
|
||||
template <typename T> using ZPerWorkerIterator = ZValueIterator<ZPerWorkerStorage, T>;
|
||||
|
||||
template <typename S, typename T>
|
||||
class ZValueConstIterator {
|
||||
@ -170,22 +133,8 @@ public:
|
||||
bool next(const T** value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerCPUConstIterator : public ZValueConstIterator<ZPerCPUStorage, T> {
|
||||
public:
|
||||
ZPerCPUConstIterator(const ZPerCPU<T>* value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerNUMAConstIterator : public ZValueConstIterator<ZPerNUMAStorage, T> {
|
||||
public:
|
||||
ZPerNUMAConstIterator(const ZPerNUMA<T>* value);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZPerWorkerConstIterator : public ZValueConstIterator<ZPerWorkerStorage, T> {
|
||||
public:
|
||||
ZPerWorkerConstIterator(const ZPerWorker<T>* value);
|
||||
};
|
||||
template <typename T> using ZPerCPUConstIterator = ZValueConstIterator<ZPerCPUStorage, T>;
|
||||
template <typename T> using ZPerNUMAConstIterator = ZValueConstIterator<ZPerNUMAStorage, T>;
|
||||
template <typename T> using ZPerWorkerConstIterator = ZValueConstIterator<ZPerWorkerStorage, T>;
|
||||
|
||||
#endif // SHARE_GC_Z_ZVALUE_HPP
|
||||
|
@ -173,38 +173,6 @@ inline void ZValue<S, T>::set_all(const T& value) {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline ZContended<T>::ZContended() :
|
||||
ZValue<ZContendedStorage, T>() {}
|
||||
|
||||
template <typename T>
|
||||
inline ZContended<T>::ZContended(const T& value) :
|
||||
ZValue<ZContendedStorage, T>(value) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerCPU<T>::ZPerCPU() :
|
||||
ZValue<ZPerCPUStorage, T>() {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerCPU<T>::ZPerCPU(const T& value) :
|
||||
ZValue<ZPerCPUStorage, T>(value) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerNUMA<T>::ZPerNUMA() :
|
||||
ZValue<ZPerNUMAStorage, T>() {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerNUMA<T>::ZPerNUMA(const T& value) :
|
||||
ZValue<ZPerNUMAStorage, T>(value) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerWorker<T>::ZPerWorker() :
|
||||
ZValue<ZPerWorkerStorage, T>() {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerWorker<T>::ZPerWorker(const T& value) :
|
||||
ZValue<ZPerWorkerStorage, T>(value) {}
|
||||
|
||||
//
|
||||
// Iterator
|
||||
//
|
||||
@ -223,18 +191,6 @@ inline bool ZValueIterator<S, T>::next(T** value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerCPUIterator<T>::ZPerCPUIterator(ZPerCPU<T>* value) :
|
||||
ZValueIterator<ZPerCPUStorage, T>(value) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerNUMAIterator<T>::ZPerNUMAIterator(ZPerNUMA<T>* value) :
|
||||
ZValueIterator<ZPerNUMAStorage, T>(value) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerWorkerIterator<T>::ZPerWorkerIterator(ZPerWorker<T>* value) :
|
||||
ZValueIterator<ZPerWorkerStorage, T>(value) {}
|
||||
|
||||
template <typename S, typename T>
|
||||
inline ZValueConstIterator<S, T>::ZValueConstIterator(const ZValue<S, T>* value) :
|
||||
_value(value),
|
||||
@ -249,16 +205,5 @@ inline bool ZValueConstIterator<S, T>::next(const T** value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerCPUConstIterator<T>::ZPerCPUConstIterator(const ZPerCPU<T>* value) :
|
||||
ZValueConstIterator<ZPerCPUStorage, T>(value) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerNUMAConstIterator<T>::ZPerNUMAConstIterator(const ZPerNUMA<T>* value) :
|
||||
ZValueConstIterator<ZPerNUMAStorage, T>(value) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZPerWorkerConstIterator<T>::ZPerWorkerConstIterator(const ZPerWorker<T>* value) :
|
||||
ZValueConstIterator<ZPerWorkerStorage, T>(value) {}
|
||||
|
||||
#endif // SHARE_GC_Z_ZVALUE_INLINE_HPP
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "gc/z/zTask.hpp"
|
||||
#include "gc/z/zThread.hpp"
|
||||
#include "gc/z/zWorkers.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -265,7 +265,9 @@ const char* JfrOSInterface::virtualization_name() {
|
||||
} else if (vrt == VMWare) {
|
||||
return "VMWare virtualization";
|
||||
} else if (vrt == HyperV) {
|
||||
return "HyperV virtualization";
|
||||
return "Hyper-V virtualization";
|
||||
} else if (vrt == HyperVRole) {
|
||||
return "Hyper-V role";
|
||||
} else if (vrt == PowerVM) {
|
||||
return "PowerVM virtualization";
|
||||
} else if (vrt == PowerKVM) {
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "jfr/recorder/service/jfrRecorderService.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +30,7 @@
|
||||
#include "jvmci/metadataHandles.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
|
||||
JVMCIRuntime* JVMCI::_compiler_runtime = NULL;
|
||||
JVMCIRuntime* JVMCI::_java_runtime = NULL;
|
||||
|
@ -1043,7 +1043,7 @@ GrowableArray<ScopeValue*>* CodeInstaller::record_virtual_objects(JVMCIObject de
|
||||
return objects;
|
||||
}
|
||||
|
||||
void CodeInstaller::record_scope(jint pc_offset, JVMCIObject debug_info, ScopeMode scope_mode, bool return_oop, JVMCI_TRAPS) {
|
||||
void CodeInstaller::record_scope(jint pc_offset, JVMCIObject debug_info, ScopeMode scope_mode, bool is_mh_invoke, bool return_oop, JVMCI_TRAPS) {
|
||||
JVMCIObject position = jvmci_env()->get_DebugInfo_bytecodePosition(debug_info);
|
||||
if (position.is_null()) {
|
||||
// Stubs do not record scope info, just oop maps
|
||||
@ -1056,7 +1056,7 @@ void CodeInstaller::record_scope(jint pc_offset, JVMCIObject debug_info, ScopeMo
|
||||
} else {
|
||||
objectMapping = NULL;
|
||||
}
|
||||
record_scope(pc_offset, position, scope_mode, objectMapping, return_oop, JVMCI_CHECK);
|
||||
record_scope(pc_offset, position, scope_mode, objectMapping, is_mh_invoke, return_oop, JVMCI_CHECK);
|
||||
}
|
||||
|
||||
int CodeInstaller::map_jvmci_bci(int bci) {
|
||||
@ -1079,7 +1079,7 @@ int CodeInstaller::map_jvmci_bci(int bci) {
|
||||
return bci;
|
||||
}
|
||||
|
||||
void CodeInstaller::record_scope(jint pc_offset, JVMCIObject position, ScopeMode scope_mode, GrowableArray<ScopeValue*>* objects, bool return_oop, JVMCI_TRAPS) {
|
||||
void CodeInstaller::record_scope(jint pc_offset, JVMCIObject position, ScopeMode scope_mode, GrowableArray<ScopeValue*>* objects, bool is_mh_invoke, bool return_oop, JVMCI_TRAPS) {
|
||||
JVMCIObject frame;
|
||||
if (scope_mode == CodeInstaller::FullFrame) {
|
||||
if (!jvmci_env()->isa_BytecodeFrame(position)) {
|
||||
@ -1089,7 +1089,7 @@ void CodeInstaller::record_scope(jint pc_offset, JVMCIObject position, ScopeMode
|
||||
}
|
||||
JVMCIObject caller_frame = jvmci_env()->get_BytecodePosition_caller(position);
|
||||
if (caller_frame.is_non_null()) {
|
||||
record_scope(pc_offset, caller_frame, scope_mode, objects, return_oop, JVMCI_CHECK);
|
||||
record_scope(pc_offset, caller_frame, scope_mode, objects, is_mh_invoke, return_oop, JVMCI_CHECK);
|
||||
}
|
||||
|
||||
JVMCIObject hotspot_method = jvmci_env()->get_BytecodePosition_method(position);
|
||||
@ -1181,7 +1181,7 @@ void CodeInstaller::record_scope(jint pc_offset, JVMCIObject position, ScopeMode
|
||||
throw_exception = jvmci_env()->get_BytecodeFrame_rethrowException(frame) == JNI_TRUE;
|
||||
}
|
||||
|
||||
_debug_recorder->describe_scope(pc_offset, method, NULL, bci, reexecute, throw_exception, false, return_oop,
|
||||
_debug_recorder->describe_scope(pc_offset, method, NULL, bci, reexecute, throw_exception, is_mh_invoke, return_oop,
|
||||
locals_token, expressions_token, monitors_token);
|
||||
}
|
||||
|
||||
@ -1236,9 +1236,19 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, JVMCIObject si
|
||||
OopMap *map = create_oop_map(debug_info, JVMCI_CHECK);
|
||||
_debug_recorder->add_safepoint(next_pc_offset, map);
|
||||
|
||||
bool return_oop = hotspot_method.is_non_null() && jvmci_env()->asMethod(hotspot_method)->is_returning_oop();
|
||||
|
||||
record_scope(next_pc_offset, debug_info, CodeInstaller::FullFrame, return_oop, JVMCI_CHECK);
|
||||
if (hotspot_method.is_non_null()) {
|
||||
Method *method = jvmci_env()->asMethod(hotspot_method);
|
||||
vmIntrinsics::ID iid = method->intrinsic_id();
|
||||
bool is_mh_invoke = false;
|
||||
if (jvmci_env()->get_site_Call_direct(site)) {
|
||||
is_mh_invoke = !method->is_static() && (iid == vmIntrinsics::_compiledLambdaForm ||
|
||||
(MethodHandles::is_signature_polymorphic(iid) && MethodHandles::is_signature_polymorphic_intrinsic(iid)));
|
||||
}
|
||||
bool return_oop = method->is_returning_oop();
|
||||
record_scope(next_pc_offset, debug_info, CodeInstaller::FullFrame, is_mh_invoke, return_oop, JVMCI_CHECK);
|
||||
} else {
|
||||
record_scope(next_pc_offset, debug_info, CodeInstaller::FullFrame, JVMCI_CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
if (foreign_call.is_non_null()) {
|
||||
@ -1347,6 +1357,9 @@ void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, JVMCIObject si
|
||||
case DEOPT_HANDLER_ENTRY:
|
||||
_offsets.set_value(CodeOffsets::Deopt, pc_offset);
|
||||
break;
|
||||
case DEOPT_MH_HANDLER_ENTRY:
|
||||
_offsets.set_value(CodeOffsets::DeoptMH, pc_offset);
|
||||
break;
|
||||
case FRAME_COMPLETE:
|
||||
_offsets.set_value(CodeOffsets::Frame_Complete, pc_offset);
|
||||
break;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -159,6 +159,7 @@ private:
|
||||
CRC_TABLE_ADDRESS,
|
||||
LOG_OF_HEAP_REGION_GRAIN_BYTES,
|
||||
INLINE_CONTIGUOUS_ALLOCATION_SUPPORTED,
|
||||
DEOPT_MH_HANDLER_ENTRY,
|
||||
INVOKE_INVALID = -1
|
||||
};
|
||||
|
||||
@ -297,11 +298,11 @@ protected:
|
||||
|
||||
int map_jvmci_bci(int bci);
|
||||
|
||||
void record_scope(jint pc_offset, JVMCIObject debug_info, ScopeMode scope_mode, bool return_oop, JVMCI_TRAPS);
|
||||
void record_scope(jint pc_offset, JVMCIObject debug_info, ScopeMode scope_mode, bool is_mh_invoke, bool return_oop, JVMCI_TRAPS);
|
||||
void record_scope(jint pc_offset, JVMCIObject debug_info, ScopeMode scope_mode, JVMCI_TRAPS) {
|
||||
record_scope(pc_offset, debug_info, scope_mode, false /* return_oop */, JVMCIENV);
|
||||
record_scope(pc_offset, debug_info, scope_mode, false /* is_mh_invoke */, false /* return_oop */, JVMCIENV);
|
||||
}
|
||||
void record_scope(jint pc_offset, JVMCIObject position, ScopeMode scope_mode, GrowableArray<ScopeValue*>* objects, bool return_oop, JVMCI_TRAPS);
|
||||
void record_scope(jint pc_offset, JVMCIObject position, ScopeMode scope_mode, GrowableArray<ScopeValue*>* objects, bool is_mh_invoke, bool return_oop, JVMCI_TRAPS);
|
||||
void record_object_value(ObjectValue* sv, JVMCIObject value, GrowableArray<ScopeValue*>* objects, JVMCI_TRAPS);
|
||||
|
||||
GrowableArray<ScopeValue*>* record_virtual_objects(JVMCIObject debug_info, JVMCI_TRAPS);
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "jvmci/jvmciEnv.hpp"
|
||||
#include "jvmci/jvmciRuntime.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
JVMCICompiler* JVMCICompiler::_instance = NULL;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -175,6 +175,7 @@
|
||||
end_class \
|
||||
start_class(site_Call, jdk_vm_ci_code_site_Call) \
|
||||
object_field(site_Call, target, "Ljdk/vm/ci/meta/InvokeTarget;") \
|
||||
boolean_field(site_Call, direct) \
|
||||
end_class \
|
||||
start_class(site_DataPatch, jdk_vm_ci_code_site_DataPatch) \
|
||||
object_field(site_DataPatch, reference, "Ljdk/vm/ci/code/site/Reference;") \
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/shared/gcConfig.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
|
||||
fileStream* JVMCIGlobals::_jni_config_file = NULL;
|
||||
|
@ -471,8 +471,14 @@
|
||||
declare_constant(CodeInstaller::CRC_TABLE_ADDRESS) \
|
||||
declare_constant(CodeInstaller::LOG_OF_HEAP_REGION_GRAIN_BYTES) \
|
||||
declare_constant(CodeInstaller::INLINE_CONTIGUOUS_ALLOCATION_SUPPORTED) \
|
||||
declare_constant(CodeInstaller::DEOPT_MH_HANDLER_ENTRY) \
|
||||
declare_constant(CodeInstaller::INVOKE_INVALID) \
|
||||
\
|
||||
declare_constant(vmIntrinsics::FIRST_MH_SIG_POLY) \
|
||||
declare_constant(vmIntrinsics::LAST_MH_SIG_POLY) \
|
||||
declare_constant(vmIntrinsics::_invokeGeneric) \
|
||||
declare_constant(vmIntrinsics::_compiledLambdaForm) \
|
||||
\
|
||||
declare_constant(CollectedHeap::Serial) \
|
||||
declare_constant(CollectedHeap::Parallel) \
|
||||
declare_constant(CollectedHeap::G1) \
|
||||
@ -966,4 +972,3 @@ void jvmci_vmStructs_init() {
|
||||
JVMCIVMStructs::init();
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
|
@ -157,6 +157,10 @@ ArchiveBuilder::~ArchiveBuilder() {
|
||||
|
||||
clean_up_src_obj_table();
|
||||
|
||||
for (int i = 0; i < _symbols->length(); i++) {
|
||||
_symbols->at(i)->decrement_refcount();
|
||||
}
|
||||
|
||||
delete _klasses;
|
||||
delete _symbols;
|
||||
delete _special_refs;
|
||||
@ -197,7 +201,10 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
|
||||
}
|
||||
_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for()
|
||||
} else if (ref->msotype() == MetaspaceObj::SymbolType) {
|
||||
_symbols->append((Symbol*)ref->obj());
|
||||
// Make sure the symbol won't be GC'ed while we are dumping the archive.
|
||||
Symbol* sym = (Symbol*)ref->obj();
|
||||
sym->increment_refcount();
|
||||
_symbols->append(sym);
|
||||
}
|
||||
|
||||
int bytes = ref->size() * BytesPerWord;
|
||||
@ -274,9 +281,13 @@ void ArchiveBuilder::sort_klasses() {
|
||||
void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
||||
int i;
|
||||
|
||||
int num_symbols = _symbols->length();
|
||||
for (i = 0; i < num_symbols; i++) {
|
||||
it->push(&_symbols->at(i));
|
||||
if (!is_relocating_pointers) {
|
||||
// Don't relocate _symbol, so we can safely call decrement_refcount on the
|
||||
// original symbols.
|
||||
int num_symbols = _symbols->length();
|
||||
for (i = 0; i < num_symbols; i++) {
|
||||
it->push(&_symbols->at(i));
|
||||
}
|
||||
}
|
||||
|
||||
int num_klasses = _klasses->length();
|
||||
|
@ -25,7 +25,9 @@
|
||||
#ifndef SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
||||
#define SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
||||
|
||||
#include "memory/archiveUtils.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
@ -33,7 +35,6 @@
|
||||
|
||||
class CHeapBitMap;
|
||||
class Klass;
|
||||
class DumpRegion;
|
||||
class Symbol;
|
||||
class DumpAllocStats;
|
||||
|
||||
@ -229,6 +230,10 @@ public:
|
||||
return klass;
|
||||
}
|
||||
|
||||
static Symbol* get_relocated_symbol(Symbol* orig_symbol) {
|
||||
return (Symbol*)singleton()->get_dumped_addr((address)orig_symbol);
|
||||
}
|
||||
|
||||
void print_stats(int ro_all, int rw_all, int mc_all);
|
||||
};
|
||||
|
||||
|
@ -24,11 +24,14 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/archiveUtils.hpp"
|
||||
#include "memory/dynamicArchive.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/heapShared.inline.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
#if INCLUDE_CDS
|
||||
|
||||
CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL;
|
||||
address* ArchivePtrMarker::_ptr_base;
|
||||
address* ArchivePtrMarker::_ptr_end;
|
||||
@ -133,4 +136,160 @@ void ArchivePtrMarker::compact(size_t max_non_null_offset) {
|
||||
_compacted = true;
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS
|
||||
char* DumpRegion::expand_top_to(char* newtop) {
|
||||
assert(is_allocatable(), "must be initialized and not packed");
|
||||
assert(newtop >= _top, "must not grow backwards");
|
||||
if (newtop > _end) {
|
||||
MetaspaceShared::report_out_of_space(_name, newtop - _top);
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (_rs == MetaspaceShared::shared_rs()) {
|
||||
uintx delta;
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
delta = DynamicArchive::object_delta_uintx(newtop);
|
||||
} else {
|
||||
delta = MetaspaceShared::object_delta_uintx(newtop);
|
||||
}
|
||||
if (delta > MAX_SHARED_DELTA) {
|
||||
// This is just a sanity check and should not appear in any real world usage. This
|
||||
// happens only if you allocate more than 2GB of shared objects and would require
|
||||
// millions of shared classes.
|
||||
vm_exit_during_initialization("Out of memory in the CDS archive",
|
||||
"Please reduce the number of shared classes.");
|
||||
}
|
||||
}
|
||||
|
||||
MetaspaceShared::commit_to(_rs, _vs, newtop);
|
||||
_top = newtop;
|
||||
return _top;
|
||||
}
|
||||
|
||||
char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
|
||||
char* p = (char*)align_up(_top, alignment);
|
||||
char* newtop = p + align_up(num_bytes, alignment);
|
||||
expand_top_to(newtop);
|
||||
memset(p, 0, newtop - p);
|
||||
return p;
|
||||
}
|
||||
|
||||
void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
|
||||
assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
|
||||
intptr_t *p = (intptr_t*)_top;
|
||||
char* newtop = _top + sizeof(intptr_t);
|
||||
expand_top_to(newtop);
|
||||
*p = n;
|
||||
if (need_to_mark) {
|
||||
ArchivePtrMarker::mark_pointer(p);
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::print(size_t total_bytes) const {
|
||||
log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
|
||||
_name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
|
||||
p2i(_base + MetaspaceShared::final_delta()));
|
||||
}
|
||||
|
||||
void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
|
||||
log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
|
||||
_name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
|
||||
if (strcmp(_name, failing_region) == 0) {
|
||||
log_error(cds)(" required = %d", int(needed_bytes));
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
|
||||
_rs = rs;
|
||||
_vs = vs;
|
||||
// Start with 0 committed bytes. The memory will be committed as needed by
|
||||
// MetaspaceShared::commit_to().
|
||||
if (!_vs->initialize(*_rs, 0)) {
|
||||
fatal("Unable to allocate memory for shared space");
|
||||
}
|
||||
_base = _top = _rs->base();
|
||||
_end = _rs->end();
|
||||
}
|
||||
|
||||
void DumpRegion::pack(DumpRegion* next) {
|
||||
assert(!is_packed(), "sanity");
|
||||
_end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment());
|
||||
_is_packed = true;
|
||||
if (next != NULL) {
|
||||
next->_rs = _rs;
|
||||
next->_vs = _vs;
|
||||
next->_base = next->_top = this->_end;
|
||||
next->_end = _rs->end();
|
||||
}
|
||||
}
|
||||
|
||||
void WriteClosure::do_oop(oop* o) {
|
||||
if (*o == NULL) {
|
||||
_dump_region->append_intptr_t(0);
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archiving heap object is not allowed");
|
||||
_dump_region->append_intptr_t(
|
||||
(intptr_t)CompressedOops::encode_not_null(*o));
|
||||
}
|
||||
}
|
||||
|
||||
void WriteClosure::do_region(u_char* start, size_t size) {
|
||||
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
|
||||
assert(size % sizeof(intptr_t) == 0, "bad size");
|
||||
do_tag((int)size);
|
||||
while (size > 0) {
|
||||
_dump_region->append_intptr_t(*(intptr_t*)start, true);
|
||||
start += sizeof(intptr_t);
|
||||
size -= sizeof(intptr_t);
|
||||
}
|
||||
}
|
||||
|
||||
void ReadClosure::do_ptr(void** p) {
|
||||
assert(*p == NULL, "initializing previous initialized pointer.");
|
||||
intptr_t obj = nextPtr();
|
||||
assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
|
||||
"hit tag while initializing ptrs.");
|
||||
*p = (void*)obj;
|
||||
}
|
||||
|
||||
void ReadClosure::do_u4(u4* p) {
|
||||
intptr_t obj = nextPtr();
|
||||
*p = (u4)(uintx(obj));
|
||||
}
|
||||
|
||||
void ReadClosure::do_bool(bool* p) {
|
||||
intptr_t obj = nextPtr();
|
||||
*p = (bool)(uintx(obj));
|
||||
}
|
||||
|
||||
void ReadClosure::do_tag(int tag) {
|
||||
int old_tag;
|
||||
old_tag = (int)(intptr_t)nextPtr();
|
||||
// do_int(&old_tag);
|
||||
assert(tag == old_tag, "old tag doesn't match");
|
||||
FileMapInfo::assert_mark(tag == old_tag);
|
||||
}
|
||||
|
||||
void ReadClosure::do_oop(oop *p) {
|
||||
narrowOop o = (narrowOop)nextPtr();
|
||||
if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
|
||||
*p = NULL;
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archived heap object is not allowed");
|
||||
assert(HeapShared::open_archive_heap_region_mapped(),
|
||||
"Open archive heap region is not mapped");
|
||||
*p = HeapShared::decode_from_archive(o);
|
||||
}
|
||||
}
|
||||
|
||||
void ReadClosure::do_region(u_char* start, size_t size) {
|
||||
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
|
||||
assert(size % sizeof(intptr_t) == 0, "bad size");
|
||||
do_tag((int)size);
|
||||
while (size > 0) {
|
||||
*(intptr_t*)start = nextPtr();
|
||||
start += sizeof(intptr_t);
|
||||
size -= sizeof(intptr_t);
|
||||
}
|
||||
}
|
||||
|
@ -26,9 +26,13 @@
|
||||
#define SHARE_MEMORY_ARCHIVEUTILS_HPP
|
||||
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class ReservedSpace;
|
||||
class VirtualSpace;
|
||||
|
||||
// ArchivePtrMarker is used to mark the location of pointers embedded in a CDS archive. E.g., when an
|
||||
// InstanceKlass k is dumped, we mark the location of the k->_name pointer by effectively calling
|
||||
// mark_pointer(/*ptr_loc=*/&k->_name). It's required that (_prt_base <= ptr_loc < _ptr_end). _ptr_base is
|
||||
@ -139,5 +143,100 @@ class SharedDataRelocator: public BitMapClosure {
|
||||
inline bool do_bit(size_t offset);
|
||||
};
|
||||
|
||||
class DumpRegion {
|
||||
private:
|
||||
const char* _name;
|
||||
char* _base;
|
||||
char* _top;
|
||||
char* _end;
|
||||
bool _is_packed;
|
||||
ReservedSpace* _rs;
|
||||
VirtualSpace* _vs;
|
||||
|
||||
public:
|
||||
DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
|
||||
|
||||
char* expand_top_to(char* newtop);
|
||||
char* allocate(size_t num_bytes, size_t alignment=BytesPerWord);
|
||||
|
||||
void append_intptr_t(intptr_t n, bool need_to_mark = false);
|
||||
|
||||
char* base() const { return _base; }
|
||||
char* top() const { return _top; }
|
||||
char* end() const { return _end; }
|
||||
size_t reserved() const { return _end - _base; }
|
||||
size_t used() const { return _top - _base; }
|
||||
bool is_packed() const { return _is_packed; }
|
||||
bool is_allocatable() const {
|
||||
return !is_packed() && _base != NULL;
|
||||
}
|
||||
|
||||
void print(size_t total_bytes) const;
|
||||
void print_out_of_space_msg(const char* failing_region, size_t needed_bytes);
|
||||
|
||||
void init(ReservedSpace* rs, VirtualSpace* vs);
|
||||
|
||||
void pack(DumpRegion* next = NULL);
|
||||
|
||||
bool contains(char* p) {
|
||||
return base() <= p && p < top();
|
||||
}
|
||||
};
|
||||
|
||||
// Closure for serializing initialization data out to a data area to be
|
||||
// written to the shared file.
|
||||
|
||||
class WriteClosure : public SerializeClosure {
|
||||
private:
|
||||
DumpRegion* _dump_region;
|
||||
|
||||
public:
|
||||
WriteClosure(DumpRegion* r) {
|
||||
_dump_region = r;
|
||||
}
|
||||
|
||||
void do_ptr(void** p) {
|
||||
_dump_region->append_intptr_t((intptr_t)*p, true);
|
||||
}
|
||||
|
||||
void do_u4(u4* p) {
|
||||
_dump_region->append_intptr_t((intptr_t)(*p));
|
||||
}
|
||||
|
||||
void do_bool(bool *p) {
|
||||
_dump_region->append_intptr_t((intptr_t)(*p));
|
||||
}
|
||||
|
||||
void do_tag(int tag) {
|
||||
_dump_region->append_intptr_t((intptr_t)tag);
|
||||
}
|
||||
|
||||
void do_oop(oop* o);
|
||||
void do_region(u_char* start, size_t size);
|
||||
bool reading() const { return false; }
|
||||
};
|
||||
|
||||
// Closure for serializing initialization data in from a data area
|
||||
// (ptr_array) read from the shared file.
|
||||
|
||||
class ReadClosure : public SerializeClosure {
|
||||
private:
|
||||
intptr_t** _ptr_array;
|
||||
|
||||
inline intptr_t nextPtr() {
|
||||
return *(*_ptr_array)++;
|
||||
}
|
||||
|
||||
public:
|
||||
ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
|
||||
|
||||
void do_ptr(void** p);
|
||||
void do_u4(u4* p);
|
||||
void do_bool(bool *p);
|
||||
void do_tag(int tag);
|
||||
void do_oop(oop *p);
|
||||
void do_region(u_char* start, size_t size);
|
||||
bool reading() const { return true; }
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_ARCHIVEUTILS_HPP
|
||||
|
@ -93,7 +93,6 @@ class Chunk: CHeapObj<mtChunk> {
|
||||
// Fast allocation of memory
|
||||
class Arena : public CHeapObj<mtNone> {
|
||||
protected:
|
||||
friend class ResourceMark;
|
||||
friend class HandleMark;
|
||||
friend class NoHandleMark;
|
||||
friend class VMStructs;
|
||||
|
@ -238,7 +238,7 @@ public:
|
||||
// they would need to call DynamicArchive::original_to_target() to
|
||||
// get the correct addresses.
|
||||
assert(current_dump_space() == ro_space, "Must be RO space");
|
||||
SymbolTable::write_to_archive(false);
|
||||
SymbolTable::write_to_archive(symbols());
|
||||
SystemDictionaryShared::write_to_archive(false);
|
||||
|
||||
serialized_data = ro_space->top();
|
||||
@ -270,7 +270,6 @@ public:
|
||||
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
||||
if (!is_relocating_pointers) {
|
||||
SystemDictionaryShared::dumptime_classes_do(it);
|
||||
SymbolTable::metaspace_pointers_do(it);
|
||||
}
|
||||
FileMapInfo::metaspace_pointers_do(it);
|
||||
}
|
||||
|
@ -56,9 +56,9 @@
|
||||
bool HeapShared::_closed_archive_heap_region_mapped = false;
|
||||
bool HeapShared::_open_archive_heap_region_mapped = false;
|
||||
bool HeapShared::_archive_heap_region_fixed = false;
|
||||
|
||||
address HeapShared::_narrow_oop_base;
|
||||
int HeapShared::_narrow_oop_shift;
|
||||
DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
|
||||
|
||||
//
|
||||
// If you add new entries to the following tables, you should know what you're doing!
|
||||
@ -233,7 +233,6 @@ void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
|
||||
create_archived_object_cache();
|
||||
|
||||
log_info(cds)("Dumping objects to closed archive heap region ...");
|
||||
NOT_PRODUCT(StringTable::verify());
|
||||
copy_closed_archive_heap_objects(closed);
|
||||
|
||||
log_info(cds)("Dumping objects to open archive heap region ...");
|
||||
@ -253,7 +252,7 @@ void HeapShared::copy_closed_archive_heap_objects(
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range();
|
||||
|
||||
// Archive interned string objects
|
||||
StringTable::write_to_archive();
|
||||
StringTable::write_to_archive(_dumped_interned_strings);
|
||||
|
||||
archive_object_subgraphs(closed_archive_subgraph_entry_fields,
|
||||
num_closed_archive_subgraph_entry_fields,
|
||||
@ -962,6 +961,11 @@ void HeapShared::init_subgraph_entry_fields(Thread* THREAD) {
|
||||
THREAD);
|
||||
}
|
||||
|
||||
void HeapShared::init_for_dumping(Thread* THREAD) {
|
||||
_dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings();
|
||||
init_subgraph_entry_fields(THREAD);
|
||||
}
|
||||
|
||||
void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
|
||||
int num, bool is_closed_archive,
|
||||
Thread* THREAD) {
|
||||
@ -1015,6 +1019,17 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
|
||||
#endif
|
||||
}
|
||||
|
||||
// Not all the strings in the global StringTable are dumped into the archive, because
|
||||
// some of those strings may be only referenced by classes that are excluded from
|
||||
// the archive. We need to explicitly mark the strings that are:
|
||||
// [1] used by classes that WILL be archived;
|
||||
// [2] included in the SharedArchiveConfigFile.
|
||||
void HeapShared::add_to_dumped_interned_strings(oop string) {
|
||||
assert_at_safepoint(); // DumpedInternedStrings uses raw oops
|
||||
bool created;
|
||||
_dumped_interned_strings->put_if_absent(string, true, &created);
|
||||
}
|
||||
|
||||
// At dump-time, find the location of all the non-null oop pointers in an archived heap
|
||||
// region. This way we can quickly relocate all the pointers without using
|
||||
// BasicOopIterateClosure at runtime.
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_MEMORY_HEAPSHARED_HPP
|
||||
|
||||
#include "classfile/compactHashtable.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
@ -38,6 +39,8 @@
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
class DumpedInternedStrings;
|
||||
|
||||
struct ArchivableStaticFieldInfo {
|
||||
const char* klass_name;
|
||||
const char* field_name;
|
||||
@ -123,12 +126,18 @@ class HeapShared: AllStatic {
|
||||
static bool _closed_archive_heap_region_mapped;
|
||||
static bool _open_archive_heap_region_mapped;
|
||||
static bool _archive_heap_region_fixed;
|
||||
static DumpedInternedStrings *_dumped_interned_strings;
|
||||
|
||||
public:
|
||||
static bool oop_equals(oop const& p1, oop const& p2) {
|
||||
return p1 == p2;
|
||||
}
|
||||
static unsigned oop_hash(oop const& p);
|
||||
static unsigned string_oop_hash(oop const& string) {
|
||||
return java_lang_String::hash_code(string);
|
||||
}
|
||||
|
||||
private:
|
||||
typedef ResourceHashtable<oop, oop,
|
||||
HeapShared::oop_hash,
|
||||
HeapShared::oop_equals,
|
||||
@ -274,6 +283,7 @@ private:
|
||||
TRAPS);
|
||||
|
||||
static ResourceBitMap calculate_oopmap(MemRegion region);
|
||||
static void add_to_dumped_interned_strings(oop string);
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
public:
|
||||
@ -324,8 +334,20 @@ private:
|
||||
static void patch_archived_heap_embedded_pointers(MemRegion mem, address oopmap,
|
||||
size_t oopmap_in_bits) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
static void init_for_dumping(Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void init_subgraph_entry_fields(Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void write_subgraph_info_table() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void serialize_subgraph_info_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
};
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
class DumpedInternedStrings :
|
||||
public ResourceHashtable<oop, bool,
|
||||
HeapShared::string_oop_hash,
|
||||
HeapShared::oop_equals,
|
||||
15889, // prime number
|
||||
ResourceObj::C_HEAP>
|
||||
{};
|
||||
#endif
|
||||
|
||||
#endif // SHARE_MEMORY_HEAPSHARED_HPP
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "memory/metaspaceTracer.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
|
@ -56,6 +56,7 @@
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oopHandle.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
@ -114,92 +115,6 @@ bool MetaspaceShared::_use_optimized_module_handling = true;
|
||||
// The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
|
||||
// Their layout is independent of the other 4 regions.
|
||||
|
||||
char* DumpRegion::expand_top_to(char* newtop) {
|
||||
assert(is_allocatable(), "must be initialized and not packed");
|
||||
assert(newtop >= _top, "must not grow backwards");
|
||||
if (newtop > _end) {
|
||||
MetaspaceShared::report_out_of_space(_name, newtop - _top);
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (_rs == MetaspaceShared::shared_rs()) {
|
||||
uintx delta;
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
delta = DynamicArchive::object_delta_uintx(newtop);
|
||||
} else {
|
||||
delta = MetaspaceShared::object_delta_uintx(newtop);
|
||||
}
|
||||
if (delta > MAX_SHARED_DELTA) {
|
||||
// This is just a sanity check and should not appear in any real world usage. This
|
||||
// happens only if you allocate more than 2GB of shared objects and would require
|
||||
// millions of shared classes.
|
||||
vm_exit_during_initialization("Out of memory in the CDS archive",
|
||||
"Please reduce the number of shared classes.");
|
||||
}
|
||||
}
|
||||
|
||||
MetaspaceShared::commit_to(_rs, _vs, newtop);
|
||||
_top = newtop;
|
||||
return _top;
|
||||
}
|
||||
|
||||
char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
|
||||
char* p = (char*)align_up(_top, alignment);
|
||||
char* newtop = p + align_up(num_bytes, alignment);
|
||||
expand_top_to(newtop);
|
||||
memset(p, 0, newtop - p);
|
||||
return p;
|
||||
}
|
||||
|
||||
void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
|
||||
assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
|
||||
intptr_t *p = (intptr_t*)_top;
|
||||
char* newtop = _top + sizeof(intptr_t);
|
||||
expand_top_to(newtop);
|
||||
*p = n;
|
||||
if (need_to_mark) {
|
||||
ArchivePtrMarker::mark_pointer(p);
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::print(size_t total_bytes) const {
|
||||
log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
|
||||
_name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
|
||||
p2i(_base + MetaspaceShared::final_delta()));
|
||||
}
|
||||
|
||||
void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
|
||||
log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
|
||||
_name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
|
||||
if (strcmp(_name, failing_region) == 0) {
|
||||
log_error(cds)(" required = %d", int(needed_bytes));
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
|
||||
_rs = rs;
|
||||
_vs = vs;
|
||||
// Start with 0 committed bytes. The memory will be committed as needed by
|
||||
// MetaspaceShared::commit_to().
|
||||
if (!_vs->initialize(*_rs, 0)) {
|
||||
fatal("Unable to allocate memory for shared space");
|
||||
}
|
||||
_base = _top = _rs->base();
|
||||
_end = _rs->end();
|
||||
}
|
||||
|
||||
void DumpRegion::pack(DumpRegion* next) {
|
||||
assert(!is_packed(), "sanity");
|
||||
_end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment());
|
||||
_is_packed = true;
|
||||
if (next != NULL) {
|
||||
next->_rs = _rs;
|
||||
next->_vs = _vs;
|
||||
next->_base = next->_top = this->_end;
|
||||
next->_end = _rs->end();
|
||||
}
|
||||
}
|
||||
|
||||
static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols");
|
||||
static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
|
||||
|
||||
@ -452,10 +367,12 @@ void MetaspaceShared::post_initialize(TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
static GrowableArrayCHeap<Handle, mtClassShared>* _extra_interned_strings = NULL;
|
||||
static GrowableArrayCHeap<OopHandle, mtClassShared>* _extra_interned_strings = NULL;
|
||||
static GrowableArrayCHeap<Symbol*, mtClassShared>* _extra_symbols = NULL;
|
||||
|
||||
void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
|
||||
_extra_interned_strings = new GrowableArrayCHeap<Handle, mtClassShared>(10000);
|
||||
_extra_interned_strings = new GrowableArrayCHeap<OopHandle, mtClassShared>(10000);
|
||||
_extra_symbols = new GrowableArrayCHeap<Symbol*, mtClassShared>(1000);
|
||||
|
||||
HashtableTextDump reader(filename);
|
||||
reader.check_version("VERSION: 1.0");
|
||||
@ -474,10 +391,10 @@ void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
|
||||
utf8_buffer[utf8_length] = '\0';
|
||||
|
||||
if (prefix_type == HashtableTextDump::SymbolPrefix) {
|
||||
SymbolTable::new_permanent_symbol(utf8_buffer);
|
||||
_extra_symbols->append(SymbolTable::new_permanent_symbol(utf8_buffer));
|
||||
} else{
|
||||
assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity");
|
||||
oop s = StringTable::intern(utf8_buffer, THREAD);
|
||||
oop str = StringTable::intern(utf8_buffer, THREAD);
|
||||
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d",
|
||||
@ -486,7 +403,7 @@ void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
|
||||
} else {
|
||||
#if INCLUDE_G1GC
|
||||
if (UseG1GC) {
|
||||
typeArrayOop body = java_lang_String::value(s);
|
||||
typeArrayOop body = java_lang_String::value(str);
|
||||
const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body);
|
||||
if (hr->is_humongous()) {
|
||||
// Don't keep it alive, so it will be GC'ed before we dump the strings, in order
|
||||
@ -497,11 +414,9 @@ void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
|
||||
}
|
||||
}
|
||||
#endif
|
||||
// Interned strings are GC'ed if there are no references to it, so let's
|
||||
// add a reference to keep this string alive.
|
||||
assert(s != NULL, "must succeed");
|
||||
Handle h(THREAD, s);
|
||||
_extra_interned_strings->append(h);
|
||||
// Make sure this string is included in the dumped interned string table.
|
||||
assert(str != NULL, "must succeed");
|
||||
_extra_interned_strings->append(OopHandle(Universe::vm_global(), str));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -983,30 +898,6 @@ bool MetaspaceShared::is_valid_shared_method(const Method* m) {
|
||||
return CppVtableCloner<Method>::is_valid_shared_object(m);
|
||||
}
|
||||
|
||||
void WriteClosure::do_oop(oop* o) {
|
||||
if (*o == NULL) {
|
||||
_dump_region->append_intptr_t(0);
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archiving heap object is not allowed");
|
||||
_dump_region->append_intptr_t(
|
||||
(intptr_t)CompressedOops::encode_not_null(*o));
|
||||
}
|
||||
}
|
||||
|
||||
void WriteClosure::do_region(u_char* start, size_t size) {
|
||||
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
|
||||
assert(size % sizeof(intptr_t) == 0, "bad size");
|
||||
do_tag((int)size);
|
||||
while (size > 0) {
|
||||
_dump_region->append_intptr_t(*(intptr_t*)start, true);
|
||||
start += sizeof(intptr_t);
|
||||
size -= sizeof(intptr_t);
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the shared space.
|
||||
|
||||
class VM_PopulateDumpSharedSpace: public VM_Operation {
|
||||
private:
|
||||
GrowableArray<MemRegion> *_closed_archive_heap_regions;
|
||||
@ -1019,7 +910,10 @@ private:
|
||||
void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
|
||||
GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
|
||||
void dump_symbols();
|
||||
void dump_shared_symbol_table(GrowableArray<Symbol*>* symbols) {
|
||||
log_info(cds)("Dumping symbol table ...");
|
||||
SymbolTable::write_to_archive(symbols);
|
||||
}
|
||||
char* dump_read_only_tables();
|
||||
void print_region_stats(FileMapInfo* map_info);
|
||||
void print_bitmap_region_stats(size_t size, size_t total_size);
|
||||
@ -1043,18 +937,21 @@ public:
|
||||
FileMapInfo::metaspace_pointers_do(it, false);
|
||||
SystemDictionaryShared::dumptime_classes_do(it);
|
||||
Universe::metaspace_pointers_do(it);
|
||||
SymbolTable::metaspace_pointers_do(it);
|
||||
vmSymbols::metaspace_pointers_do(it);
|
||||
|
||||
// The above code should find all the symbols that are referenced by the
|
||||
// archived classes. We just need to add the extra symbols which
|
||||
// may not be used by any of the archived classes -- these are usually
|
||||
// symbols that we anticipate to be used at run time, so we can store
|
||||
// them in the RO region, to be shared across multiple processes.
|
||||
if (_extra_symbols != NULL) {
|
||||
for (int i = 0; i < _extra_symbols->length(); i++) {
|
||||
it->push(_extra_symbols->adr_at(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_symbols() {
|
||||
log_info(cds)("Dumping symbol table ...");
|
||||
|
||||
NOT_PRODUCT(SymbolTable::verify());
|
||||
SymbolTable::write_to_archive();
|
||||
}
|
||||
|
||||
char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
|
||||
ArchiveBuilder::OtherROAllocMark mark;
|
||||
|
||||
@ -1179,7 +1076,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
builder.dump_ro_region();
|
||||
builder.relocate_pointers();
|
||||
|
||||
dump_symbols();
|
||||
dump_shared_symbol_table(builder.symbols());
|
||||
|
||||
// Dump supported java heap objects
|
||||
_closed_archive_heap_regions = NULL;
|
||||
@ -1430,7 +1327,7 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
|
||||
}
|
||||
log_info(cds)("Reading extra data: done.");
|
||||
|
||||
HeapShared::init_subgraph_entry_fields(THREAD);
|
||||
HeapShared::init_for_dumping(THREAD);
|
||||
|
||||
// Rewrite and link classes
|
||||
log_info(cds)("Rewriting and linking classes ...");
|
||||
@ -1522,6 +1419,22 @@ bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
|
||||
// Find all the interned strings that should be dumped.
|
||||
int i;
|
||||
for (i = 0; i < _global_klass_objects->length(); i++) {
|
||||
Klass* k = _global_klass_objects->at(i);
|
||||
if (k->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
ik->constants()->add_dumped_interned_strings();
|
||||
}
|
||||
}
|
||||
if (_extra_interned_strings != NULL) {
|
||||
for (i = 0; i < _extra_interned_strings->length(); i ++) {
|
||||
OopHandle string = _extra_interned_strings->at(i);
|
||||
HeapShared::add_to_dumped_interned_strings(string.resolve());
|
||||
}
|
||||
}
|
||||
|
||||
// The closed and open archive heap space has maximum two regions.
|
||||
// See FileMapInfo::write_archive_heap_regions() for details.
|
||||
_closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
|
||||
@ -1564,56 +1477,6 @@ void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegi
|
||||
}
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
void ReadClosure::do_ptr(void** p) {
|
||||
assert(*p == NULL, "initializing previous initialized pointer.");
|
||||
intptr_t obj = nextPtr();
|
||||
assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
|
||||
"hit tag while initializing ptrs.");
|
||||
*p = (void*)obj;
|
||||
}
|
||||
|
||||
void ReadClosure::do_u4(u4* p) {
|
||||
intptr_t obj = nextPtr();
|
||||
*p = (u4)(uintx(obj));
|
||||
}
|
||||
|
||||
void ReadClosure::do_bool(bool* p) {
|
||||
intptr_t obj = nextPtr();
|
||||
*p = (bool)(uintx(obj));
|
||||
}
|
||||
|
||||
void ReadClosure::do_tag(int tag) {
|
||||
int old_tag;
|
||||
old_tag = (int)(intptr_t)nextPtr();
|
||||
// do_int(&old_tag);
|
||||
assert(tag == old_tag, "old tag doesn't match");
|
||||
FileMapInfo::assert_mark(tag == old_tag);
|
||||
}
|
||||
|
||||
void ReadClosure::do_oop(oop *p) {
|
||||
narrowOop o = (narrowOop)nextPtr();
|
||||
if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
|
||||
*p = NULL;
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archived heap object is not allowed");
|
||||
assert(HeapShared::open_archive_heap_region_mapped(),
|
||||
"Open archive heap region is not mapped");
|
||||
*p = HeapShared::decode_from_archive(o);
|
||||
}
|
||||
}
|
||||
|
||||
void ReadClosure::do_region(u_char* start, size_t size) {
|
||||
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
|
||||
assert(size % sizeof(intptr_t) == 0, "bad size");
|
||||
do_tag((int)size);
|
||||
while (size > 0) {
|
||||
*(intptr_t*)start = nextPtr();
|
||||
start += sizeof(intptr_t);
|
||||
size -= sizeof(intptr_t);
|
||||
}
|
||||
}
|
||||
|
||||
void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
|
||||
assert(base <= static_top && static_top <= top, "must be");
|
||||
_shared_metaspace_static_top = static_top;
|
||||
@ -2206,8 +2069,3 @@ void MetaspaceShared::print_on(outputStream* st) {
|
||||
}
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -36,8 +36,9 @@
|
||||
#define MAX_SHARED_DELTA (0x7FFFFFFF)
|
||||
|
||||
class outputStream;
|
||||
class FileMapInfo;
|
||||
class CHeapBitMap;
|
||||
class FileMapInfo;
|
||||
class DumpRegion;
|
||||
struct ArchiveHeapOopmapInfo;
|
||||
|
||||
enum MapArchiveResult {
|
||||
@ -55,113 +56,6 @@ public:
|
||||
CompactHashtableStats string;
|
||||
};
|
||||
|
||||
#if INCLUDE_CDS
|
||||
class DumpRegion {
|
||||
private:
|
||||
const char* _name;
|
||||
char* _base;
|
||||
char* _top;
|
||||
char* _end;
|
||||
bool _is_packed;
|
||||
ReservedSpace* _rs;
|
||||
VirtualSpace* _vs;
|
||||
|
||||
public:
|
||||
DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
|
||||
|
||||
char* expand_top_to(char* newtop);
|
||||
char* allocate(size_t num_bytes, size_t alignment=BytesPerWord);
|
||||
|
||||
void append_intptr_t(intptr_t n, bool need_to_mark = false);
|
||||
|
||||
char* base() const { return _base; }
|
||||
char* top() const { return _top; }
|
||||
char* end() const { return _end; }
|
||||
size_t reserved() const { return _end - _base; }
|
||||
size_t used() const { return _top - _base; }
|
||||
bool is_packed() const { return _is_packed; }
|
||||
bool is_allocatable() const {
|
||||
return !is_packed() && _base != NULL;
|
||||
}
|
||||
|
||||
void print(size_t total_bytes) const;
|
||||
void print_out_of_space_msg(const char* failing_region, size_t needed_bytes);
|
||||
|
||||
void init(ReservedSpace* rs, VirtualSpace* vs);
|
||||
|
||||
void pack(DumpRegion* next = NULL);
|
||||
|
||||
bool contains(char* p) {
|
||||
return base() <= p && p < top();
|
||||
}
|
||||
};
|
||||
|
||||
// Closure for serializing initialization data out to a data area to be
|
||||
// written to the shared file.
|
||||
|
||||
class WriteClosure : public SerializeClosure {
|
||||
private:
|
||||
DumpRegion* _dump_region;
|
||||
|
||||
public:
|
||||
WriteClosure(DumpRegion* r) {
|
||||
_dump_region = r;
|
||||
}
|
||||
|
||||
void do_ptr(void** p) {
|
||||
_dump_region->append_intptr_t((intptr_t)*p, true);
|
||||
}
|
||||
|
||||
void do_u4(u4* p) {
|
||||
_dump_region->append_intptr_t((intptr_t)(*p));
|
||||
}
|
||||
|
||||
void do_bool(bool *p) {
|
||||
_dump_region->append_intptr_t((intptr_t)(*p));
|
||||
}
|
||||
|
||||
void do_tag(int tag) {
|
||||
_dump_region->append_intptr_t((intptr_t)tag);
|
||||
}
|
||||
|
||||
void do_oop(oop* o);
|
||||
|
||||
void do_region(u_char* start, size_t size);
|
||||
|
||||
bool reading() const { return false; }
|
||||
};
|
||||
|
||||
// Closure for serializing initialization data in from a data area
|
||||
// (ptr_array) read from the shared file.
|
||||
|
||||
class ReadClosure : public SerializeClosure {
|
||||
private:
|
||||
intptr_t** _ptr_array;
|
||||
|
||||
inline intptr_t nextPtr() {
|
||||
return *(*_ptr_array)++;
|
||||
}
|
||||
|
||||
public:
|
||||
ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
|
||||
|
||||
void do_ptr(void** p);
|
||||
|
||||
void do_u4(u4* p);
|
||||
|
||||
void do_bool(bool *p);
|
||||
|
||||
void do_tag(int tag);
|
||||
|
||||
void do_oop(oop *p);
|
||||
|
||||
void do_region(u_char* start, size_t size);
|
||||
|
||||
bool reading() const { return true; }
|
||||
};
|
||||
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
// Class Data Sharing Support
|
||||
class MetaspaceShared : AllStatic {
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
|
||||
@ -40,9 +40,25 @@ void ResourceArea::bias_to(MEMFLAGS new_flags) {
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------ResourceMark-----------------------------------
|
||||
debug_only(int ResourceArea::_warned;) // to suppress multiple warnings
|
||||
#ifdef ASSERT
|
||||
|
||||
void ResourceArea::verify_has_resource_mark() {
|
||||
if (_nesting <= 0) {
|
||||
// Only report the first occurrence of an allocating thread that
|
||||
// is missing a ResourceMark, to avoid possible recursive errors
|
||||
// in error handling.
|
||||
static volatile bool reported = false;
|
||||
if (!Atomic::load(&reported)) {
|
||||
if (!Atomic::cmpxchg(&reported, false, true)) {
|
||||
fatal("memory leak: allocating without ResourceMark");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
//------------------------------ResourceMark-----------------------------------
|
||||
// The following routines are declared in allocation.hpp and used everywhere:
|
||||
|
||||
// Allocation in thread-local resource area
|
||||
@ -60,30 +76,3 @@ extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_s
|
||||
extern void resource_free_bytes( char *old, size_t size ) {
|
||||
Thread::current()->resource_area()->Afree(old, size);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
ResourceMark::ResourceMark(Thread *thread) {
|
||||
assert(thread == Thread::current(), "not the current thread");
|
||||
initialize(thread);
|
||||
}
|
||||
|
||||
DeoptResourceMark::DeoptResourceMark(Thread *thread) {
|
||||
assert(thread == Thread::current(), "not the current thread");
|
||||
initialize(thread);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// Non-product code
|
||||
#ifndef PRODUCT
|
||||
|
||||
void ResourceMark::free_malloced_objects() {
|
||||
Arena::free_malloced_objects(_chunk, _hwm, _max, _area->_hwm);
|
||||
}
|
||||
|
||||
void DeoptResourceMark::free_malloced_objects() {
|
||||
Arena::free_malloced_objects(_chunk, _hwm, _max, _area->_hwm);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,20 +42,19 @@
|
||||
//------------------------------ResourceArea-----------------------------------
|
||||
// A ResourceArea is an Arena that supports safe usage of ResourceMark.
|
||||
class ResourceArea: public Arena {
|
||||
friend class ResourceMark;
|
||||
friend class DeoptResourceMark;
|
||||
friend class VMStructs;
|
||||
debug_only(int _nesting;) // current # of nested ResourceMarks
|
||||
debug_only(static int _warned;) // to suppress multiple warnings
|
||||
|
||||
#ifdef ASSERT
|
||||
int _nesting; // current # of nested ResourceMarks
|
||||
void verify_has_resource_mark();
|
||||
#endif // ASSERT
|
||||
|
||||
public:
|
||||
ResourceArea(MEMFLAGS flags = mtThread) : Arena(flags) {
|
||||
debug_only(_nesting = 0;)
|
||||
}
|
||||
ResourceArea(MEMFLAGS flags = mtThread) :
|
||||
Arena(flags) DEBUG_ONLY(COMMA _nesting(0)) {}
|
||||
|
||||
ResourceArea(size_t init_size, MEMFLAGS flags = mtThread) : Arena(flags, init_size) {
|
||||
debug_only(_nesting = 0;);
|
||||
}
|
||||
ResourceArea(size_t init_size, MEMFLAGS flags = mtThread) :
|
||||
Arena(flags, init_size) DEBUG_ONLY(COMMA _nesting(0)) {}
|
||||
|
||||
char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
|
||||
|
||||
@ -63,104 +62,153 @@ public:
|
||||
// (by default, ResourceArea is tagged as mtThread, per-thread general purpose storage)
|
||||
void bias_to(MEMFLAGS flags);
|
||||
|
||||
debug_only(int nesting() const { return _nesting; })
|
||||
DEBUG_ONLY(int nesting() const { return _nesting; })
|
||||
|
||||
// Capture the state of a ResourceArea needed by a ResourceMark for
|
||||
// rollback to that mark.
|
||||
class SavedState {
|
||||
friend class ResourceArea;
|
||||
Chunk* _chunk;
|
||||
char* _hwm;
|
||||
char* _max;
|
||||
size_t _size_in_bytes;
|
||||
DEBUG_ONLY(int _nesting;)
|
||||
|
||||
public:
|
||||
SavedState(ResourceArea* area) :
|
||||
_chunk(area->_chunk),
|
||||
_hwm(area->_hwm),
|
||||
_max(area->_max),
|
||||
_size_in_bytes(area->_size_in_bytes)
|
||||
DEBUG_ONLY(COMMA _nesting(area->_nesting))
|
||||
{}
|
||||
};
|
||||
|
||||
// Check and adjust debug-only nesting level.
|
||||
void activate_state(const SavedState& state) {
|
||||
assert(_nesting == state._nesting, "precondition");
|
||||
assert(_nesting >= 0, "precondition");
|
||||
assert(_nesting < INT_MAX, "nesting overflow");
|
||||
DEBUG_ONLY(++_nesting;)
|
||||
}
|
||||
|
||||
// Check and adjust debug-only nesting level.
|
||||
void deactivate_state(const SavedState& state) {
|
||||
assert(_nesting > state._nesting, "deactivating inactive mark");
|
||||
assert((_nesting - state._nesting) == 1, "deactivating across another mark");
|
||||
DEBUG_ONLY(--_nesting;)
|
||||
}
|
||||
|
||||
// Roll back the allocation state to the indicated state values.
|
||||
// The state must be the current state for this thread.
|
||||
void rollback_to(const SavedState& state) {
|
||||
assert(_nesting > state._nesting, "rollback to inactive mark");
|
||||
assert((_nesting - state._nesting) == 1, "rollback across another mark");
|
||||
|
||||
if (UseMallocOnly) {
|
||||
free_malloced_objects(state._chunk, state._hwm, state._max, _hwm);
|
||||
}
|
||||
|
||||
if (state._chunk->next() != nullptr) { // Delete later chunks.
|
||||
// Reset size before deleting chunks. Otherwise, the total
|
||||
// size could exceed the total chunk size.
|
||||
assert(size_in_bytes() > state._size_in_bytes,
|
||||
"size: " SIZE_FORMAT ", saved size: " SIZE_FORMAT,
|
||||
size_in_bytes(), state._size_in_bytes);
|
||||
set_size_in_bytes(state._size_in_bytes);
|
||||
state._chunk->next_chop();
|
||||
} else {
|
||||
assert(size_in_bytes() == state._size_in_bytes, "Sanity check");
|
||||
}
|
||||
_chunk = state._chunk; // Roll back to saved chunk.
|
||||
_hwm = state._hwm;
|
||||
_max = state._max;
|
||||
|
||||
// Clear out this chunk (to detect allocation bugs)
|
||||
if (ZapResourceArea) {
|
||||
memset(state._hwm, badResourceValue, state._max - state._hwm);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//------------------------------ResourceMark-----------------------------------
|
||||
// A resource mark releases all resources allocated after it was constructed
|
||||
// when the destructor is called. Typically used as a local variable.
|
||||
|
||||
// Shared part of implementation for ResourceMark and DeoptResourceMark.
|
||||
class ResourceMarkImpl {
|
||||
ResourceArea* _area; // Resource area to stack allocate
|
||||
ResourceArea::SavedState _saved_state;
|
||||
|
||||
NONCOPYABLE(ResourceMarkImpl);
|
||||
|
||||
public:
|
||||
explicit ResourceMarkImpl(ResourceArea* area) :
|
||||
_area(area),
|
||||
_saved_state(area)
|
||||
{
|
||||
_area->activate_state(_saved_state);
|
||||
}
|
||||
|
||||
explicit ResourceMarkImpl(Thread* thread)
|
||||
: ResourceMarkImpl(thread->resource_area()) {}
|
||||
|
||||
~ResourceMarkImpl() {
|
||||
reset_to_mark();
|
||||
_area->deactivate_state(_saved_state);
|
||||
}
|
||||
|
||||
void reset_to_mark() const {
|
||||
_area->rollback_to(_saved_state);
|
||||
}
|
||||
};
|
||||
|
||||
class ResourceMark: public StackObj {
|
||||
protected:
|
||||
ResourceArea *_area; // Resource area to stack allocate
|
||||
Chunk *_chunk; // saved arena chunk
|
||||
char *_hwm, *_max;
|
||||
size_t _size_in_bytes;
|
||||
const ResourceMarkImpl _impl;
|
||||
#ifdef ASSERT
|
||||
Thread* _thread;
|
||||
ResourceMark* _previous_resource_mark;
|
||||
#endif //ASSERT
|
||||
|
||||
void initialize(Thread *thread) {
|
||||
_area = thread->resource_area();
|
||||
_chunk = _area->_chunk;
|
||||
_hwm = _area->_hwm;
|
||||
_max= _area->_max;
|
||||
_size_in_bytes = _area->size_in_bytes();
|
||||
debug_only(_area->_nesting++;)
|
||||
assert( _area->_nesting > 0, "must stack allocate RMs" );
|
||||
#ifdef ASSERT
|
||||
_thread = thread;
|
||||
_previous_resource_mark = thread->current_resource_mark();
|
||||
thread->set_current_resource_mark(this);
|
||||
#endif // ASSERT
|
||||
}
|
||||
public:
|
||||
|
||||
NONCOPYABLE(ResourceMark);
|
||||
|
||||
// Helper providing common constructor implementation.
|
||||
#ifndef ASSERT
|
||||
ResourceMark(Thread *thread) {
|
||||
assert(thread == Thread::current(), "not the current thread");
|
||||
initialize(thread);
|
||||
}
|
||||
ResourceMark(ResourceArea* area, Thread* thread) : _impl(area) {}
|
||||
#else
|
||||
ResourceMark(Thread *thread);
|
||||
ResourceMark(ResourceArea* area, Thread* thread) :
|
||||
_impl(area),
|
||||
_thread(thread),
|
||||
_previous_resource_mark(nullptr)
|
||||
{
|
||||
if (_thread != nullptr) {
|
||||
assert(_thread == Thread::current(), "not the current thread");
|
||||
_previous_resource_mark = _thread->current_resource_mark();
|
||||
_thread->set_current_resource_mark(this);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
ResourceMark() { initialize(Thread::current()); }
|
||||
public:
|
||||
|
||||
ResourceMark() : ResourceMark(Thread::current()) {}
|
||||
|
||||
explicit ResourceMark(Thread* thread)
|
||||
: ResourceMark(thread->resource_area(), thread) {}
|
||||
|
||||
explicit ResourceMark(ResourceArea* area)
|
||||
: ResourceMark(area, DEBUG_ONLY(Thread::current_or_null()) NOT_DEBUG(nullptr)) {}
|
||||
|
||||
ResourceMark( ResourceArea *r ) :
|
||||
_area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) {
|
||||
_size_in_bytes = r->_size_in_bytes;
|
||||
debug_only(_area->_nesting++;)
|
||||
assert( _area->_nesting > 0, "must stack allocate RMs" );
|
||||
#ifdef ASSERT
|
||||
Thread* thread = Thread::current_or_null();
|
||||
if (thread != NULL) {
|
||||
_thread = thread;
|
||||
_previous_resource_mark = thread->current_resource_mark();
|
||||
thread->set_current_resource_mark(this);
|
||||
} else {
|
||||
_thread = NULL;
|
||||
_previous_resource_mark = NULL;
|
||||
}
|
||||
#endif // ASSERT
|
||||
}
|
||||
|
||||
void reset_to_mark() {
|
||||
if (UseMallocOnly) free_malloced_objects();
|
||||
|
||||
if( _chunk->next() ) { // Delete later chunks
|
||||
// reset arena size before delete chunks. Otherwise, the total
|
||||
// arena size could exceed total chunk size
|
||||
assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
|
||||
_area->set_size_in_bytes(size_in_bytes());
|
||||
_chunk->next_chop();
|
||||
} else {
|
||||
assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check");
|
||||
}
|
||||
_area->_chunk = _chunk; // Roll back arena to saved chunk
|
||||
_area->_hwm = _hwm;
|
||||
_area->_max = _max;
|
||||
|
||||
// clear out this chunk (to detect allocation bugs)
|
||||
if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm);
|
||||
}
|
||||
|
||||
~ResourceMark() {
|
||||
assert( _area->_nesting > 0, "must stack allocate RMs" );
|
||||
debug_only(_area->_nesting--;)
|
||||
reset_to_mark();
|
||||
#ifdef ASSERT
|
||||
if (_thread != NULL) {
|
||||
if (_thread != nullptr) {
|
||||
_thread->set_current_resource_mark(_previous_resource_mark);
|
||||
}
|
||||
#endif // ASSERT
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
|
||||
private:
|
||||
void free_malloced_objects() PRODUCT_RETURN;
|
||||
size_t size_in_bytes() { return _size_in_bytes; }
|
||||
void reset_to_mark() { _impl.reset_to_mark(); }
|
||||
};
|
||||
|
||||
//------------------------------DeoptResourceMark-----------------------------------
|
||||
@ -190,75 +238,18 @@ protected:
|
||||
// special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj
|
||||
// then existing ResourceMarks would work fine since no one use new to allocate them
|
||||
// and they would be stack allocated. This leaves open the possibility of accidental
|
||||
// misuse so we simple duplicate the ResourceMark functionality here.
|
||||
// misuse so we duplicate the ResourceMark functionality via a shared implementation
|
||||
// class.
|
||||
|
||||
class DeoptResourceMark: public CHeapObj<mtInternal> {
|
||||
protected:
|
||||
ResourceArea *_area; // Resource area to stack allocate
|
||||
Chunk *_chunk; // saved arena chunk
|
||||
char *_hwm, *_max;
|
||||
size_t _size_in_bytes;
|
||||
const ResourceMarkImpl _impl;
|
||||
|
||||
void initialize(Thread *thread) {
|
||||
_area = thread->resource_area();
|
||||
_chunk = _area->_chunk;
|
||||
_hwm = _area->_hwm;
|
||||
_max= _area->_max;
|
||||
_size_in_bytes = _area->size_in_bytes();
|
||||
debug_only(_area->_nesting++;)
|
||||
assert( _area->_nesting > 0, "must stack allocate RMs" );
|
||||
}
|
||||
NONCOPYABLE(DeoptResourceMark);
|
||||
|
||||
public:
|
||||
public:
|
||||
explicit DeoptResourceMark(Thread* thread) : _impl(thread) {}
|
||||
|
||||
#ifndef ASSERT
|
||||
DeoptResourceMark(Thread *thread) {
|
||||
assert(thread == Thread::current(), "not the current thread");
|
||||
initialize(thread);
|
||||
}
|
||||
#else
|
||||
DeoptResourceMark(Thread *thread);
|
||||
#endif // ASSERT
|
||||
|
||||
DeoptResourceMark() { initialize(Thread::current()); }
|
||||
|
||||
DeoptResourceMark( ResourceArea *r ) :
|
||||
_area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) {
|
||||
_size_in_bytes = _area->size_in_bytes();
|
||||
debug_only(_area->_nesting++;)
|
||||
assert( _area->_nesting > 0, "must stack allocate RMs" );
|
||||
}
|
||||
|
||||
void reset_to_mark() {
|
||||
if (UseMallocOnly) free_malloced_objects();
|
||||
|
||||
if( _chunk->next() ) { // Delete later chunks
|
||||
// reset arena size before delete chunks. Otherwise, the total
|
||||
// arena size could exceed total chunk size
|
||||
assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
|
||||
_area->set_size_in_bytes(size_in_bytes());
|
||||
_chunk->next_chop();
|
||||
} else {
|
||||
assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check");
|
||||
}
|
||||
_area->_chunk = _chunk; // Roll back arena to saved chunk
|
||||
_area->_hwm = _hwm;
|
||||
_area->_max = _max;
|
||||
|
||||
// clear out this chunk (to detect allocation bugs)
|
||||
if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm);
|
||||
}
|
||||
|
||||
~DeoptResourceMark() {
|
||||
assert( _area->_nesting > 0, "must stack allocate RMs" );
|
||||
debug_only(_area->_nesting--;)
|
||||
reset_to_mark();
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
void free_malloced_objects() PRODUCT_RETURN;
|
||||
size_t size_in_bytes() { return _size_in_bytes; };
|
||||
void reset_to_mark() { _impl.reset_to_mark(); }
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_RESOURCEAREA_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,17 +26,17 @@
|
||||
#define SHARE_MEMORY_RESOURCEAREA_INLINE_HPP
|
||||
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
|
||||
inline char* ResourceArea::allocate_bytes(size_t size, AllocFailType alloc_failmode) {
|
||||
#ifdef ASSERT
|
||||
if (_nesting < 1 && !_warned++)
|
||||
fatal("memory leak: allocating without ResourceMark");
|
||||
verify_has_resource_mark();
|
||||
if (UseMallocOnly) {
|
||||
// use malloc, but save pointer in res. area for later freeing
|
||||
char** save = (char**)internal_malloc_4(sizeof(char*));
|
||||
return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
|
||||
}
|
||||
#endif
|
||||
#endif // ASSERT
|
||||
return (char*)Amalloc(size, alloc_failmode);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +30,7 @@
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
// For UseCompressedOops.
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/constMethod.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
@ -423,6 +424,15 @@ void ConstMethod::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
it->push_method_entry(&this_ptr, (intptr_t*)&_adapter_trampoline);
|
||||
}
|
||||
|
||||
void ConstMethod::set_adapter_trampoline(AdapterHandlerEntry** trampoline) {
|
||||
Arguments::assert_is_dumping_archive();
|
||||
if (DumpSharedSpaces) {
|
||||
assert(*trampoline == NULL,
|
||||
"must be NULL during dump time, to be initialized at run time");
|
||||
}
|
||||
_adapter_trampoline = trampoline;
|
||||
}
|
||||
|
||||
// Printing
|
||||
|
||||
void ConstMethod::print_on(outputStream* st) const {
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define SHARE_OOPS_CONSTMETHOD_HPP
|
||||
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
// An ConstMethod represents portions of a Java method which are not written to after
|
||||
@ -292,14 +291,7 @@ public:
|
||||
"shared methods in archive have fixed adapter_trampoline");
|
||||
_adapter = adapter;
|
||||
}
|
||||
void set_adapter_trampoline(AdapterHandlerEntry** trampoline) {
|
||||
Arguments::assert_is_dumping_archive();
|
||||
if (DumpSharedSpaces) {
|
||||
assert(*trampoline == NULL,
|
||||
"must be NULL during dump time, to be initialized at run time");
|
||||
}
|
||||
_adapter_trampoline = trampoline;
|
||||
}
|
||||
void set_adapter_trampoline(AdapterHandlerEntry** trampoline);
|
||||
void update_adapter_trampoline(AdapterHandlerEntry* adapter) {
|
||||
assert(is_shared(), "must be");
|
||||
*_adapter_trampoline = adapter;
|
||||
|
@ -293,17 +293,18 @@ void ConstantPool::archive_resolved_references(Thread* THREAD) {
|
||||
int ref_map_len = ref_map == NULL ? 0 : ref_map->length();
|
||||
int rr_len = rr->length();
|
||||
for (int i = 0; i < rr_len; i++) {
|
||||
oop p = rr->obj_at(i);
|
||||
oop obj = rr->obj_at(i);
|
||||
rr->obj_at_put(i, NULL);
|
||||
if (p != NULL && i < ref_map_len) {
|
||||
if (obj != NULL && i < ref_map_len) {
|
||||
int index = object_to_cp_index(i);
|
||||
if (tag_at(index).is_string()) {
|
||||
oop op = StringTable::create_archived_string(p, THREAD);
|
||||
// If the String object is not archived (possibly too large),
|
||||
// NULL is returned. Also set it in the array, so we won't
|
||||
// have a 'bad' reference in the archived resolved_reference
|
||||
// array.
|
||||
rr->obj_at_put(i, op);
|
||||
oop archived_string = HeapShared::find_archived_heap_object(obj);
|
||||
// Update the reference to point to the archived copy
|
||||
// of this string.
|
||||
// If the string is too large to archive, NULL is
|
||||
// stored into rr. At run time, string_at_impl() will create and intern
|
||||
// the string.
|
||||
rr->obj_at_put(i, archived_string);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -335,6 +336,19 @@ void ConstantPool::resolve_class_constants(TRAPS) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ConstantPool::add_dumped_interned_strings() {
|
||||
objArrayOop rr = resolved_references();
|
||||
if (rr != NULL) {
|
||||
int rr_len = rr->length();
|
||||
for (int i = 0; i < rr_len; i++) {
|
||||
oop p = rr->obj_at(i);
|
||||
if (java_lang_String::is_instance(p)) {
|
||||
HeapShared::add_to_dumped_interned_strings(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// CDS support. Create a new resolved_references array.
|
||||
|
@ -740,6 +740,7 @@ class ConstantPool : public Metadata {
|
||||
|
||||
// CDS support
|
||||
void archive_resolved_references(Thread *THREAD) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void add_dumped_interned_strings() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void resolve_class_constants(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void remove_unshareable_info();
|
||||
void restore_unshareable_info(TRAPS);
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
@ -69,6 +69,7 @@
|
||||
#include "prims/jvmtiRedefineClasses.hpp"
|
||||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "prims/methodComparator.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
@ -3599,8 +3600,10 @@ const char* InstanceKlass::internal_name() const {
|
||||
}
|
||||
|
||||
void InstanceKlass::print_class_load_logging(ClassLoaderData* loader_data,
|
||||
const char* module_name,
|
||||
const ModuleEntry* module_entry,
|
||||
const ClassFileStream* cfs) const {
|
||||
log_to_classlist(cfs);
|
||||
|
||||
if (!log_is_enabled(Info, class, load)) {
|
||||
return;
|
||||
}
|
||||
@ -3615,6 +3618,7 @@ void InstanceKlass::print_class_load_logging(ClassLoaderData* loader_data,
|
||||
// Source
|
||||
if (cfs != NULL) {
|
||||
if (cfs->source() != NULL) {
|
||||
const char* module_name = (module_entry->name() == NULL) ? UNNAMED_MODULE : module_entry->name()->as_C_string();
|
||||
if (module_name != NULL) {
|
||||
// When the boot loader created the stream, it didn't know the module name
|
||||
// yet. Let's format it now.
|
||||
@ -4190,3 +4194,52 @@ unsigned char * InstanceKlass::get_cached_class_file_bytes() {
|
||||
return VM_RedefineClasses::get_cached_class_file_bytes(_cached_class_file);
|
||||
}
|
||||
#endif
|
||||
|
||||
void InstanceKlass::log_to_classlist(const ClassFileStream* stream) const {
|
||||
#if INCLUDE_CDS
|
||||
if (DumpLoadedClassList && classlist_file->is_open()) {
|
||||
if (!ClassLoader::has_jrt_entry()) {
|
||||
warning("DumpLoadedClassList and CDS are not supported in exploded build");
|
||||
DumpLoadedClassList = NULL;
|
||||
return;
|
||||
}
|
||||
ClassLoaderData* loader_data = class_loader_data();
|
||||
if (!SystemDictionaryShared::is_sharing_possible(loader_data)) {
|
||||
return;
|
||||
}
|
||||
bool skip = false;
|
||||
if (is_shared()) {
|
||||
assert(stream == NULL, "shared class with stream");
|
||||
} else {
|
||||
assert(stream != NULL, "non-shared class without stream");
|
||||
// skip hidden class and unsafe anonymous class.
|
||||
if ( is_hidden() || unsafe_anonymous_host() != NULL) {
|
||||
return;
|
||||
}
|
||||
oop class_loader = loader_data->class_loader();
|
||||
if (class_loader == NULL || SystemDictionary::is_platform_class_loader(class_loader)) {
|
||||
// For the boot and platform class loaders, skip classes that are not found in the
|
||||
// java runtime image, such as those found in the --patch-module entries.
|
||||
// These classes can't be loaded from the archive during runtime.
|
||||
if (!stream->from_boot_loader_modules_image() && strncmp(stream->source(), "jrt:", 4) != 0) {
|
||||
skip = true;
|
||||
}
|
||||
|
||||
if (class_loader == NULL && ClassLoader::contains_append_entry(stream->source())) {
|
||||
// .. but don't skip the boot classes that are loaded from -Xbootclasspath/a
|
||||
// as they can be loaded from the archive during runtime.
|
||||
skip = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
ResourceMark rm;
|
||||
if (skip) {
|
||||
tty->print_cr("skip writing class %s from source %s to classlist file",
|
||||
name()->as_C_string(), stream->source());
|
||||
} else {
|
||||
classlist_file->print_cr("%s", name()->as_C_string());
|
||||
classlist_file->flush();
|
||||
}
|
||||
}
|
||||
#endif // INCLUDE_CDS
|
||||
}
|
||||
|
@ -1322,6 +1322,8 @@ private:
|
||||
void link_previous_versions(InstanceKlass* pv) { _previous_versions = pv; }
|
||||
void mark_newly_obsolete_methods(Array<Method*>* old_methods, int emcp_method_count);
|
||||
#endif
|
||||
// log class name to classlist
|
||||
void log_to_classlist(const ClassFileStream* cfs) const;
|
||||
public:
|
||||
// CDS support - remove and restore oops from metadata. Oops are not shared.
|
||||
virtual void remove_unshareable_info();
|
||||
@ -1363,7 +1365,7 @@ public:
|
||||
|
||||
// Logging
|
||||
void print_class_load_logging(ClassLoaderData* loader_data,
|
||||
const char* module_name,
|
||||
const ModuleEntry* module_entry,
|
||||
const ClassFileStream* cfs) const;
|
||||
};
|
||||
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "compiler/compilerDefinitions.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
class MethodCounters : public Metadata {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user