8003424: Enable Class Data Sharing for CompressedOops
8016729: ObjectAlignmentInBytes=16 now forces the use of heap based compressed oops 8005933: The -Xshare:auto option is ignored for -server Move klass metaspace above the heap and support CDS with compressed klass ptrs. Reviewed-by: coleenp, kvn, mgerdin, tschatzl, stefank
This commit is contained in:
parent
9d372e7801
commit
4d91f4e69d
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
@ -1145,7 +1146,7 @@ void MacroAssembler::set_narrow_klass(Klass* k, Register d) {
|
||||
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
int klass_index = oop_recorder()->find_index(k);
|
||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||
narrowOop encoded_k = oopDesc::encode_klass(k);
|
||||
narrowOop encoded_k = Klass::encode_klass(k);
|
||||
|
||||
assert_not_delayed();
|
||||
// Relocation with special format (see relocInfo_sparc.hpp).
|
||||
@ -1419,7 +1420,6 @@ void MacroAssembler::verify_oop_subroutine() {
|
||||
load_klass(O0_obj, O0_obj);
|
||||
// assert((klass != NULL)
|
||||
br_null_short(O0_obj, pn, fail);
|
||||
// TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
|
||||
|
||||
wrccr( O5_save_flags ); // Restore CCR's
|
||||
|
||||
@ -4089,52 +4089,91 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
|
||||
}
|
||||
|
||||
void MacroAssembler::encode_klass_not_null(Register r) {
|
||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
||||
assert (UseCompressedKlassPointers, "must be compressed");
|
||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
if (Universe::narrow_klass_base() != NULL)
|
||||
sub(r, G6_heapbase, r);
|
||||
srlx(r, LogKlassAlignmentInBytes, r);
|
||||
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
|
||||
assert(r != G6_heapbase, "bad register choice");
|
||||
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
|
||||
sub(r, G6_heapbase, r);
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
srlx(r, LogKlassAlignmentInBytes, r);
|
||||
}
|
||||
reinit_heapbase();
|
||||
}
|
||||
|
||||
void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
|
||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
||||
assert (UseCompressedKlassPointers, "must be compressed");
|
||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
if (Universe::narrow_klass_base() == NULL) {
|
||||
srlx(src, LogKlassAlignmentInBytes, dst);
|
||||
if (src == dst) {
|
||||
encode_klass_not_null(src);
|
||||
} else {
|
||||
sub(src, G6_heapbase, dst);
|
||||
srlx(dst, LogKlassAlignmentInBytes, dst);
|
||||
assert (UseCompressedKlassPointers, "must be compressed");
|
||||
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
|
||||
set((intptr_t)Universe::narrow_klass_base(), dst);
|
||||
sub(src, dst, dst);
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
srlx(dst, LogKlassAlignmentInBytes, dst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function instr_size_for_decode_klass_not_null() counts the instructions
|
||||
// generated by decode_klass_not_null() and reinit_heapbase(). Hence, if
|
||||
// the instructions they generate change, then this method needs to be updated.
|
||||
int MacroAssembler::instr_size_for_decode_klass_not_null() {
|
||||
assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
|
||||
// set + add + set
|
||||
int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
|
||||
insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
|
||||
if (Universe::narrow_klass_shift() == 0) {
|
||||
return num_instrs * BytesPerInstWord;
|
||||
} else { // sllx
|
||||
return (num_instrs + 1) * BytesPerInstWord;
|
||||
}
|
||||
}
|
||||
|
||||
// !!! If the instructions that get generated here change then function
|
||||
// instr_size_for_decode_klass_not_null() needs to get updated.
|
||||
void MacroAssembler::decode_klass_not_null(Register r) {
|
||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
||||
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
|
||||
// pd_code_size_limit.
|
||||
assert (UseCompressedKlassPointers, "must be compressed");
|
||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
sllx(r, LogKlassAlignmentInBytes, r);
|
||||
if (Universe::narrow_klass_base() != NULL)
|
||||
add(r, G6_heapbase, r);
|
||||
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
|
||||
assert(r != G6_heapbase, "bad register choice");
|
||||
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
|
||||
if (Universe::narrow_klass_shift() != 0)
|
||||
sllx(r, LogKlassAlignmentInBytes, r);
|
||||
add(r, G6_heapbase, r);
|
||||
reinit_heapbase();
|
||||
}
|
||||
|
||||
void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
|
||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
||||
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
|
||||
// pd_code_size_limit.
|
||||
assert (UseCompressedKlassPointers, "must be compressed");
|
||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
sllx(src, LogKlassAlignmentInBytes, dst);
|
||||
if (Universe::narrow_klass_base() != NULL)
|
||||
add(dst, G6_heapbase, dst);
|
||||
if (src == dst) {
|
||||
decode_klass_not_null(src);
|
||||
} else {
|
||||
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
|
||||
// pd_code_size_limit.
|
||||
assert (UseCompressedKlassPointers, "must be compressed");
|
||||
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
|
||||
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
|
||||
sllx(src, LogKlassAlignmentInBytes, dst);
|
||||
add(dst, G6_heapbase, dst);
|
||||
reinit_heapbase();
|
||||
} else {
|
||||
set((intptr_t)Universe::narrow_klass_base(), dst);
|
||||
add(src, dst, dst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::reinit_heapbase() {
|
||||
if (UseCompressedOops || UseCompressedKlassPointers) {
|
||||
AddressLiteral base(Universe::narrow_ptrs_base_addr());
|
||||
load_ptr_contents(base, G6_heapbase);
|
||||
if (Universe::heap() != NULL) {
|
||||
set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
|
||||
} else {
|
||||
AddressLiteral base(Universe::narrow_ptrs_base_addr());
|
||||
load_ptr_contents(base, G6_heapbase);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1177,6 +1177,9 @@ public:
|
||||
void push_CPU_state();
|
||||
void pop_CPU_state();
|
||||
|
||||
// Returns the byte size of the instructions generated by decode_klass_not_null().
|
||||
static int instr_size_for_decode_klass_not_null();
|
||||
|
||||
// if heap base register is used - reinit it with the correct value
|
||||
void reinit_heapbase();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -97,7 +97,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
|
||||
if (format() != 0) {
|
||||
assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
|
||||
jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : oopDesc::encode_klass((Klass*)x);
|
||||
jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
|
||||
inst &= ~Assembler::hi22(-1);
|
||||
inst |= Assembler::hi22((intptr_t)np);
|
||||
if (verify_only) {
|
||||
|
@ -559,10 +559,7 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
|
||||
int klass_load_size;
|
||||
if (UseCompressedKlassPointers) {
|
||||
assert(Universe::heap() != NULL, "java heap should be initialized");
|
||||
if (Universe::narrow_klass_base() == NULL)
|
||||
klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
|
||||
else
|
||||
klass_load_size = 3*BytesPerInstWord;
|
||||
klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
|
||||
} else {
|
||||
klass_load_size = 1*BytesPerInstWord;
|
||||
}
|
||||
@ -1663,9 +1660,12 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||
if (UseCompressedKlassPointers) {
|
||||
assert(Universe::heap() != NULL, "java heap should be initialized");
|
||||
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
|
||||
st->print_cr("\tSLL R_G5,3,R_G5");
|
||||
if (Universe::narrow_klass_base() != NULL)
|
||||
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
|
||||
st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
st->print_cr("\tSLL R_G5,3,R_G5");
|
||||
}
|
||||
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
|
||||
st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base");
|
||||
} else {
|
||||
st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
|
||||
}
|
||||
@ -2563,10 +2563,7 @@ encode %{
|
||||
int klass_load_size;
|
||||
if (UseCompressedKlassPointers) {
|
||||
assert(Universe::heap() != NULL, "java heap should be initialized");
|
||||
if (Universe::narrow_klass_base() == NULL)
|
||||
klass_load_size = 2*BytesPerInstWord;
|
||||
else
|
||||
klass_load_size = 3*BytesPerInstWord;
|
||||
klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
|
||||
} else {
|
||||
klass_load_size = 1*BytesPerInstWord;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -219,13 +219,13 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||
const int basic = 5*BytesPerInstWord +
|
||||
// shift;add for load_klass (only shift with zero heap based)
|
||||
(UseCompressedKlassPointers ?
|
||||
((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
|
||||
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
|
||||
return basic + slop;
|
||||
} else {
|
||||
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
|
||||
// shift;add for load_klass (only shift with zero heap based)
|
||||
(UseCompressedKlassPointers ?
|
||||
((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
|
||||
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
|
||||
return (basic + slop);
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
@ -4810,23 +4811,8 @@ void MacroAssembler::load_klass(Register dst, Register src) {
|
||||
}
|
||||
|
||||
void MacroAssembler::load_prototype_header(Register dst, Register src) {
|
||||
#ifdef _LP64
|
||||
if (UseCompressedKlassPointers) {
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
|
||||
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
|
||||
} else {
|
||||
movq(dst, Address(dst, Klass::prototype_header_offset()));
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
movptr(dst, Address(dst, Klass::prototype_header_offset()));
|
||||
}
|
||||
load_klass(dst, src);
|
||||
movptr(dst, Address(dst, Klass::prototype_header_offset()));
|
||||
}
|
||||
|
||||
void MacroAssembler::store_klass(Register dst, Register src) {
|
||||
@ -4914,7 +4900,7 @@ void MacroAssembler::store_klass_gap(Register dst, Register src) {
|
||||
|
||||
#ifdef ASSERT
|
||||
void MacroAssembler::verify_heapbase(const char* msg) {
|
||||
assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
|
||||
assert (UseCompressedOops, "should be compressed");
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
if (CheckCompressedOops) {
|
||||
Label ok;
|
||||
@ -5058,69 +5044,80 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
|
||||
}
|
||||
|
||||
void MacroAssembler::encode_klass_not_null(Register r) {
|
||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
||||
#ifdef ASSERT
|
||||
verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
|
||||
#endif
|
||||
if (Universe::narrow_klass_base() != NULL) {
|
||||
subq(r, r12_heapbase);
|
||||
}
|
||||
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
|
||||
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
|
||||
assert(r != r12_heapbase, "Encoding a klass in r12");
|
||||
mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
|
||||
subq(r, r12_heapbase);
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
shrq(r, LogKlassAlignmentInBytes);
|
||||
}
|
||||
reinit_heapbase();
|
||||
}
|
||||
|
||||
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
|
||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
||||
#ifdef ASSERT
|
||||
verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
|
||||
#endif
|
||||
if (dst != src) {
|
||||
movq(dst, src);
|
||||
}
|
||||
if (Universe::narrow_klass_base() != NULL) {
|
||||
subq(dst, r12_heapbase);
|
||||
}
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
shrq(dst, LogKlassAlignmentInBytes);
|
||||
if (dst == src) {
|
||||
encode_klass_not_null(src);
|
||||
} else {
|
||||
mov64(dst, (int64_t)Universe::narrow_klass_base());
|
||||
negq(dst);
|
||||
addq(dst, src);
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
shrq(dst, LogKlassAlignmentInBytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function instr_size_for_decode_klass_not_null() counts the instructions
|
||||
// generated by decode_klass_not_null(register r) and reinit_heapbase(),
|
||||
// when (Universe::heap() != NULL). Hence, if the instructions they
|
||||
// generate change, then this method needs to be updated.
|
||||
int MacroAssembler::instr_size_for_decode_klass_not_null() {
|
||||
assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
|
||||
// mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
|
||||
return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
|
||||
}
|
||||
|
||||
// !!! If the instructions that get generated here change then function
|
||||
// instr_size_for_decode_klass_not_null() needs to get updated.
|
||||
void MacroAssembler::decode_klass_not_null(Register r) {
|
||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
||||
// Note: it will change flags
|
||||
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
|
||||
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
|
||||
assert(r != r12_heapbase, "Decoding a klass in r12");
|
||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||
// Also do not verify_oop as this is called by verify_oop.
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
shlq(r, LogKlassAlignmentInBytes);
|
||||
if (Universe::narrow_klass_base() != NULL) {
|
||||
addq(r, r12_heapbase);
|
||||
}
|
||||
} else {
|
||||
assert (Universe::narrow_klass_base() == NULL, "sanity");
|
||||
}
|
||||
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
|
||||
mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
|
||||
addq(r, r12_heapbase);
|
||||
reinit_heapbase();
|
||||
}
|
||||
|
||||
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
|
||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
||||
// Note: it will change flags
|
||||
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
|
||||
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
|
||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||
// Also do not verify_oop as this is called by verify_oop.
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
|
||||
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
|
||||
if (dst == src) {
|
||||
decode_klass_not_null(dst);
|
||||
} else {
|
||||
assert (Universe::narrow_klass_base() == NULL, "sanity");
|
||||
if (dst != src) {
|
||||
movq(dst, src);
|
||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||
// Also do not verify_oop as this is called by verify_oop.
|
||||
|
||||
mov64(dst, (int64_t)Universe::narrow_klass_base());
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||
assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
|
||||
leaq(dst, Address(dst, src, Address::times_8, 0));
|
||||
} else {
|
||||
addq(dst, src);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -5148,7 +5145,7 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
int klass_index = oop_recorder()->find_index(k);
|
||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||
mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
|
||||
mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
|
||||
}
|
||||
|
||||
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
|
||||
@ -5156,7 +5153,7 @@ void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
|
||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
int klass_index = oop_recorder()->find_index(k);
|
||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||
mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
|
||||
mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
|
||||
@ -5182,7 +5179,7 @@ void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
|
||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
int klass_index = oop_recorder()->find_index(k);
|
||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||
Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
|
||||
Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
|
||||
@ -5190,14 +5187,23 @@ void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
|
||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
int klass_index = oop_recorder()->find_index(k);
|
||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||
Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
|
||||
Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
|
||||
}
|
||||
|
||||
void MacroAssembler::reinit_heapbase() {
|
||||
if (UseCompressedOops || UseCompressedKlassPointers) {
|
||||
movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
|
||||
if (Universe::heap() != NULL) {
|
||||
if (Universe::narrow_oop_base() == NULL) {
|
||||
MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
|
||||
} else {
|
||||
mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
|
||||
}
|
||||
} else {
|
||||
movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
|
@ -371,6 +371,10 @@ class MacroAssembler: public Assembler {
|
||||
void cmp_narrow_klass(Register dst, Klass* k);
|
||||
void cmp_narrow_klass(Address dst, Klass* k);
|
||||
|
||||
// Returns the byte size of the instructions generated by decode_klass_not_null()
|
||||
// when compressed klass pointers are being used.
|
||||
static int instr_size_for_decode_klass_not_null();
|
||||
|
||||
// if heap base register is used - reinit it with the correct value
|
||||
void reinit_heapbase();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,9 +55,9 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
}
|
||||
} else {
|
||||
if (verify_only) {
|
||||
assert(*(uint32_t*) disp == oopDesc::encode_klass((Klass*)x), "instructions must match");
|
||||
assert(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
|
||||
} else {
|
||||
*(int32_t*) disp = oopDesc::encode_klass((Klass*)x);
|
||||
*(int32_t*) disp = Klass::encode_klass((Klass*)x);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -675,7 +675,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
|
||||
// TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
|
||||
|
||||
// return if everything seems ok
|
||||
__ bind(exit);
|
||||
|
@ -1021,7 +1021,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ load_klass(rax, rax); // get klass
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
|
||||
// TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
|
||||
|
||||
// return if everything seems ok
|
||||
__ bind(exit);
|
||||
|
@ -211,11 +211,11 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||
if (is_vtable_stub) {
|
||||
// Vtable stub size
|
||||
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
|
||||
(UseCompressedKlassPointers ? 16 : 0); // 1 leaq can be 3 bytes + 1 long
|
||||
(UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
|
||||
} else {
|
||||
// Itable stub size
|
||||
return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
|
||||
(UseCompressedKlassPointers ? 32 : 0); // 2 leaqs
|
||||
(UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
|
||||
}
|
||||
// In order to tune these parameters, run the JVM with VM options
|
||||
// +PrintMiscellaneous and +WizardMode to see information about
|
||||
|
@ -1393,9 +1393,7 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
||||
{
|
||||
if (UseCompressedKlassPointers) {
|
||||
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
|
||||
if (Universe::narrow_klass_shift() != 0) {
|
||||
st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
|
||||
}
|
||||
st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
|
||||
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
|
||||
} else {
|
||||
st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
|
||||
@ -4035,146 +4033,6 @@ operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 sca
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indirectNarrowKlass(rRegN reg)
|
||||
%{
|
||||
predicate(Universe::narrow_klass_shift() == 0);
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(DecodeNKlass reg);
|
||||
|
||||
format %{ "[$reg]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0x4);
|
||||
scale(0x0);
|
||||
disp(0x0);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffset8NarrowKlass(rRegN reg, immL8 off)
|
||||
%{
|
||||
predicate(Universe::narrow_klass_shift() == 0);
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP (DecodeNKlass reg) off);
|
||||
|
||||
format %{ "[$reg + $off (8-bit)]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0x4);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indOffset32NarrowKlass(rRegN reg, immL32 off)
|
||||
%{
|
||||
predicate(Universe::narrow_klass_shift() == 0);
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP (DecodeNKlass reg) off);
|
||||
|
||||
format %{ "[$reg + $off (32-bit)]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index(0x4);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indIndexOffsetNarrowKlass(rRegN reg, rRegL lreg, immL32 off)
|
||||
%{
|
||||
predicate(Universe::narrow_klass_shift() == 0);
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP (AddP (DecodeNKlass reg) lreg) off);
|
||||
|
||||
op_cost(10);
|
||||
format %{"[$reg + $off + $lreg]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index($lreg);
|
||||
scale(0x0);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indIndexNarrowKlass(rRegN reg, rRegL lreg)
|
||||
%{
|
||||
predicate(Universe::narrow_klass_shift() == 0);
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP (DecodeNKlass reg) lreg);
|
||||
|
||||
op_cost(10);
|
||||
format %{"[$reg + $lreg]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index($lreg);
|
||||
scale(0x0);
|
||||
disp(0x0);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indIndexScaleNarrowKlass(rRegN reg, rRegL lreg, immI2 scale)
|
||||
%{
|
||||
predicate(Universe::narrow_klass_shift() == 0);
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP (DecodeNKlass reg) (LShiftL lreg scale));
|
||||
|
||||
op_cost(10);
|
||||
format %{"[$reg + $lreg << $scale]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index($lreg);
|
||||
scale($scale);
|
||||
disp(0x0);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
|
||||
%{
|
||||
predicate(Universe::narrow_klass_shift() == 0);
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP (AddP (DecodeNKlass reg) (LShiftL lreg scale)) off);
|
||||
|
||||
op_cost(10);
|
||||
format %{"[$reg + $off + $lreg << $scale]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index($lreg);
|
||||
scale($scale);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indCompressedKlassOffset(rRegN reg, immL32 off) %{
|
||||
predicate(UseCompressedKlassPointers && (Universe::narrow_klass_shift() == Address::times_8));
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP (DecodeNKlass reg) off);
|
||||
|
||||
op_cost(10);
|
||||
format %{"[R12 + $reg << 3 + $off] (compressed klass addressing)" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base(0xc); // R12
|
||||
index($reg);
|
||||
scale(0x3);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
operand indPosIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegI idx, immI2 scale)
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
predicate(Universe::narrow_klass_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
|
||||
match(AddP (AddP (DecodeNKlass reg) (LShiftL (ConvI2L idx) scale)) off);
|
||||
|
||||
op_cost(10);
|
||||
format %{"[$reg + $off + $idx << $scale]" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($reg);
|
||||
index($idx);
|
||||
scale($scale);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
//----------Special Memory Operands--------------------------------------------
|
||||
// Stack Slot Operand - This operand is used for loading and storing temporary
|
||||
// values on the stack where a match requires a value to
|
||||
@ -4345,11 +4203,7 @@ opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
|
||||
indCompressedOopOffset,
|
||||
indirectNarrow, indOffset8Narrow, indOffset32Narrow,
|
||||
indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
|
||||
indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow,
|
||||
indCompressedKlassOffset,
|
||||
indirectNarrowKlass, indOffset8NarrowKlass, indOffset32NarrowKlass,
|
||||
indIndexOffsetNarrowKlass, indIndexNarrowKlass, indIndexScaleNarrowKlass,
|
||||
indIndexScaleOffsetNarrowKlass, indPosIndexScaleOffsetNarrowKlass);
|
||||
indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
|
||||
|
||||
//----------PIPELINE-----------------------------------------------------------
|
||||
// Rules which define the behavior of the target architectures pipeline.
|
||||
@ -6665,7 +6519,7 @@ instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
|
||||
instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
|
||||
match(Set dst (EncodePKlass src));
|
||||
effect(KILL cr);
|
||||
format %{ "encode_heap_oop_not_null $dst,$src" %}
|
||||
format %{ "encode_klass_not_null $dst,$src" %}
|
||||
ins_encode %{
|
||||
__ encode_klass_not_null($dst$$Register, $src$$Register);
|
||||
%}
|
||||
@ -6675,7 +6529,7 @@ instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
|
||||
instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
|
||||
match(Set dst (DecodeNKlass src));
|
||||
effect(KILL cr);
|
||||
format %{ "decode_heap_oop_not_null $dst,$src" %}
|
||||
format %{ "decode_klass_not_null $dst,$src" %}
|
||||
ins_encode %{
|
||||
Register s = $src$$Register;
|
||||
Register d = $dst$$Register;
|
||||
|
@ -362,15 +362,12 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
|
||||
ReservedSpace FileMapInfo::reserve_shared_memory() {
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
|
||||
char* requested_addr = si->_base;
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
|
||||
size_t size = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
|
||||
SharedMiscDataSize + SharedMiscCodeSize,
|
||||
alignment);
|
||||
size_t size = FileMapInfo::shared_spaces_size();
|
||||
|
||||
// Reserve the space first, then map otherwise map will go right over some
|
||||
// other reserved memory (like the code cache).
|
||||
ReservedSpace rs(size, alignment, false, requested_addr);
|
||||
ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
|
||||
if (!rs.is_reserved()) {
|
||||
fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
|
||||
return rs;
|
||||
@ -559,3 +556,19 @@ void FileMapInfo::print_shared_spaces() {
|
||||
si->_base, si->_base + si->_used);
|
||||
}
|
||||
}
|
||||
|
||||
// Unmap mapped regions of shared space.
|
||||
void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
|
||||
FileMapInfo *map_info = FileMapInfo::current_info();
|
||||
if (map_info) {
|
||||
map_info->fail_continue(msg);
|
||||
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
|
||||
if (map_info->_header._space[i]._base != NULL) {
|
||||
map_info->unmap_region(i);
|
||||
map_info->_header._space[i]._base = NULL;
|
||||
}
|
||||
}
|
||||
} else if (DumpSharedSpaces) {
|
||||
fail_stop(msg, NULL);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -150,6 +150,15 @@ public:
|
||||
// Return true if given address is in the mapped shared space.
|
||||
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
|
||||
void print_shared_spaces() NOT_CDS_RETURN;
|
||||
|
||||
static size_t shared_spaces_size() {
|
||||
return align_size_up(SharedReadOnlySize + SharedReadWriteSize +
|
||||
SharedMiscDataSize + SharedMiscCodeSize,
|
||||
os::vm_allocation_granularity());
|
||||
}
|
||||
|
||||
// Stop CDS sharing and unmap CDS regions.
|
||||
static void stop_sharing_and_unmap(const char* msg);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_FILEMAP_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -118,9 +118,12 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
|
||||
_number_of_committed_segments = size_to_segments(_memory.committed_size());
|
||||
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
|
||||
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
||||
const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
|
||||
const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
|
||||
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
|
||||
|
||||
// reserve space for _segmap
|
||||
if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
|
||||
if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
@ -54,6 +55,8 @@ size_t const allocation_from_dictionary_limit = 64 * K;
|
||||
|
||||
MetaWord* last_allocated = 0;
|
||||
|
||||
size_t Metaspace::_class_metaspace_size;
|
||||
|
||||
// Used in declarations in SpaceManager and ChunkManager
|
||||
enum ChunkIndex {
|
||||
ZeroIndex = 0,
|
||||
@ -261,10 +264,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
||||
// count of chunks contained in this VirtualSpace
|
||||
uintx _container_count;
|
||||
|
||||
// Convenience functions for logical bottom and end
|
||||
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
|
||||
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
|
||||
|
||||
// Convenience functions to access the _virtual_space
|
||||
char* low() const { return virtual_space()->low(); }
|
||||
char* high() const { return virtual_space()->high(); }
|
||||
@ -284,6 +283,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
||||
VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
|
||||
~VirtualSpaceNode();
|
||||
|
||||
// Convenience functions for logical bottom and end
|
||||
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
|
||||
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
|
||||
|
||||
// address of next available space in _virtual_space;
|
||||
// Accessors
|
||||
VirtualSpaceNode* next() { return _next; }
|
||||
@ -1313,7 +1316,8 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
|
||||
|
||||
// Class virtual space should always be expanded. Call GC for the other
|
||||
// metadata virtual space.
|
||||
if (vsl == Metaspace::class_space_list()) return true;
|
||||
if (Metaspace::using_class_space() &&
|
||||
(vsl == Metaspace::class_space_list())) return true;
|
||||
|
||||
// If this is part of an allocation after a GC, expand
|
||||
// unconditionally.
|
||||
@ -2257,7 +2261,7 @@ void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
|
||||
size_t raw_word_size = get_raw_word_size(word_size);
|
||||
size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
|
||||
assert(raw_word_size >= min_size,
|
||||
err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
|
||||
err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
|
||||
block_freelists()->return_block(p, raw_word_size);
|
||||
}
|
||||
|
||||
@ -2374,7 +2378,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
|
||||
if (result == NULL) {
|
||||
result = grow_and_allocate(word_size);
|
||||
}
|
||||
if (result > 0) {
|
||||
if (result != 0) {
|
||||
inc_used_metrics(word_size);
|
||||
assert(result != (MetaWord*) chunks_in_use(MediumIndex),
|
||||
"Head of the list is being allocated");
|
||||
@ -2478,7 +2482,8 @@ size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
|
||||
|
||||
size_t MetaspaceAux::free_bytes() {
|
||||
size_t result = 0;
|
||||
if (Metaspace::class_space_list() != NULL) {
|
||||
if (Metaspace::using_class_space() &&
|
||||
(Metaspace::class_space_list() != NULL)) {
|
||||
result = result + Metaspace::class_space_list()->free_bytes();
|
||||
}
|
||||
if (Metaspace::space_list() != NULL) {
|
||||
@ -2549,6 +2554,9 @@ size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
|
||||
}
|
||||
|
||||
size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
|
||||
if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
|
||||
return 0;
|
||||
}
|
||||
// Don't count the space in the freelists. That space will be
|
||||
// added to the capacity calculation as needed.
|
||||
size_t capacity = 0;
|
||||
@ -2563,18 +2571,23 @@ size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
|
||||
}
|
||||
|
||||
size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
|
||||
size_t reserved = (mdtype == Metaspace::ClassType) ?
|
||||
Metaspace::class_space_list()->virtual_space_total() :
|
||||
Metaspace::space_list()->virtual_space_total();
|
||||
return reserved * BytesPerWord;
|
||||
if (mdtype == Metaspace::ClassType) {
|
||||
return Metaspace::using_class_space() ?
|
||||
Metaspace::class_space_list()->virtual_space_total() * BytesPerWord : 0;
|
||||
} else {
|
||||
return Metaspace::space_list()->virtual_space_total() * BytesPerWord;
|
||||
}
|
||||
}
|
||||
|
||||
size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
|
||||
|
||||
size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
|
||||
if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
|
||||
return 0;
|
||||
}
|
||||
ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
|
||||
Metaspace::class_space_list()->chunk_manager() :
|
||||
Metaspace::space_list()->chunk_manager();
|
||||
Metaspace::class_space_list()->chunk_manager() :
|
||||
Metaspace::space_list()->chunk_manager();
|
||||
chunk->slow_verify();
|
||||
return chunk->free_chunks_total();
|
||||
}
|
||||
@ -2615,7 +2628,6 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
|
||||
|
||||
// This is printed when PrintGCDetails
|
||||
void MetaspaceAux::print_on(outputStream* out) {
|
||||
Metaspace::MetadataType ct = Metaspace::ClassType;
|
||||
Metaspace::MetadataType nct = Metaspace::NonClassType;
|
||||
|
||||
out->print_cr(" Metaspace total "
|
||||
@ -2629,12 +2641,15 @@ void MetaspaceAux::print_on(outputStream* out) {
|
||||
allocated_capacity_bytes(nct)/K,
|
||||
allocated_used_bytes(nct)/K,
|
||||
reserved_in_bytes(nct)/K);
|
||||
out->print_cr(" class space "
|
||||
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
|
||||
" reserved " SIZE_FORMAT "K",
|
||||
allocated_capacity_bytes(ct)/K,
|
||||
allocated_used_bytes(ct)/K,
|
||||
reserved_in_bytes(ct)/K);
|
||||
if (Metaspace::using_class_space()) {
|
||||
Metaspace::MetadataType ct = Metaspace::ClassType;
|
||||
out->print_cr(" class space "
|
||||
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
|
||||
" reserved " SIZE_FORMAT "K",
|
||||
allocated_capacity_bytes(ct)/K,
|
||||
allocated_used_bytes(ct)/K,
|
||||
reserved_in_bytes(ct)/K);
|
||||
}
|
||||
}
|
||||
|
||||
// Print information for class space and data space separately.
|
||||
@ -2659,13 +2674,37 @@ void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
|
||||
}
|
||||
|
||||
// Print total fragmentation for class and data metaspaces separately
|
||||
void MetaspaceAux::print_waste(outputStream* out) {
|
||||
|
||||
size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
|
||||
size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
|
||||
// Print total fragmentation for class metaspaces
|
||||
void MetaspaceAux::print_class_waste(outputStream* out) {
|
||||
assert(Metaspace::using_class_space(), "class metaspace not used");
|
||||
size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
|
||||
size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
|
||||
ClassLoaderDataGraphMetaspaceIterator iter;
|
||||
while (iter.repeat()) {
|
||||
Metaspace* msp = iter.get_next();
|
||||
if (msp != NULL) {
|
||||
cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
|
||||
cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
|
||||
cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
|
||||
cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
|
||||
cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
|
||||
cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
|
||||
cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
|
||||
}
|
||||
}
|
||||
out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
|
||||
SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
|
||||
SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
|
||||
"large count " SIZE_FORMAT,
|
||||
cls_specialized_count, cls_specialized_waste,
|
||||
cls_small_count, cls_small_waste,
|
||||
cls_medium_count, cls_medium_waste, cls_humongous_count);
|
||||
}
|
||||
|
||||
// Print total fragmentation for data and class metaspaces separately
|
||||
void MetaspaceAux::print_waste(outputStream* out) {
|
||||
size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
|
||||
size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
|
||||
|
||||
ClassLoaderDataGraphMetaspaceIterator iter;
|
||||
while (iter.repeat()) {
|
||||
@ -2678,14 +2717,6 @@ void MetaspaceAux::print_waste(outputStream* out) {
|
||||
medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
|
||||
medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
|
||||
humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
|
||||
|
||||
cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
|
||||
cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
|
||||
cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
|
||||
cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
|
||||
cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
|
||||
cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
|
||||
cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
|
||||
}
|
||||
}
|
||||
out->print_cr("Total fragmentation waste (words) doesn't count free space");
|
||||
@ -2695,13 +2726,9 @@ void MetaspaceAux::print_waste(outputStream* out) {
|
||||
"large count " SIZE_FORMAT,
|
||||
specialized_count, specialized_waste, small_count,
|
||||
small_waste, medium_count, medium_waste, humongous_count);
|
||||
out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
|
||||
SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
|
||||
SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
|
||||
"large count " SIZE_FORMAT,
|
||||
cls_specialized_count, cls_specialized_waste,
|
||||
cls_small_count, cls_small_waste,
|
||||
cls_medium_count, cls_medium_waste, cls_humongous_count);
|
||||
if (Metaspace::using_class_space()) {
|
||||
print_class_waste(out);
|
||||
}
|
||||
}
|
||||
|
||||
// Dump global metaspace things from the end of ClassLoaderDataGraph
|
||||
@ -2714,7 +2741,9 @@ void MetaspaceAux::dump(outputStream* out) {
|
||||
|
||||
void MetaspaceAux::verify_free_chunks() {
|
||||
Metaspace::space_list()->chunk_manager()->verify();
|
||||
Metaspace::class_space_list()->chunk_manager()->verify();
|
||||
if (Metaspace::using_class_space()) {
|
||||
Metaspace::class_space_list()->chunk_manager()->verify();
|
||||
}
|
||||
}
|
||||
|
||||
void MetaspaceAux::verify_capacity() {
|
||||
@ -2776,7 +2805,9 @@ Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
|
||||
|
||||
Metaspace::~Metaspace() {
|
||||
delete _vsm;
|
||||
delete _class_vsm;
|
||||
if (using_class_space()) {
|
||||
delete _class_vsm;
|
||||
}
|
||||
}
|
||||
|
||||
VirtualSpaceList* Metaspace::_space_list = NULL;
|
||||
@ -2784,9 +2815,123 @@ VirtualSpaceList* Metaspace::_class_space_list = NULL;
|
||||
|
||||
#define VIRTUALSPACEMULTIPLIER 2
|
||||
|
||||
#ifdef _LP64
|
||||
void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
|
||||
// Figure out the narrow_klass_base and the narrow_klass_shift. The
|
||||
// narrow_klass_base is the lower of the metaspace base and the cds base
|
||||
// (if cds is enabled). The narrow_klass_shift depends on the distance
|
||||
// between the lower base and higher address.
|
||||
address lower_base;
|
||||
address higher_address;
|
||||
if (UseSharedSpaces) {
|
||||
higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
|
||||
(address)(metaspace_base + class_metaspace_size()));
|
||||
lower_base = MIN2(metaspace_base, cds_base);
|
||||
} else {
|
||||
higher_address = metaspace_base + class_metaspace_size();
|
||||
lower_base = metaspace_base;
|
||||
}
|
||||
Universe::set_narrow_klass_base(lower_base);
|
||||
if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
|
||||
Universe::set_narrow_klass_shift(0);
|
||||
} else {
|
||||
assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
|
||||
Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
|
||||
}
|
||||
}
|
||||
|
||||
// Return TRUE if the specified metaspace_base and cds_base are close enough
|
||||
// to work with compressed klass pointers.
|
||||
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
|
||||
assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
|
||||
assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
|
||||
address lower_base = MIN2((address)metaspace_base, cds_base);
|
||||
address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
|
||||
(address)(metaspace_base + class_metaspace_size()));
|
||||
return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
|
||||
}
|
||||
|
||||
// Try to allocate the metaspace at the requested addr.
|
||||
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
|
||||
assert(using_class_space(), "called improperly");
|
||||
assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
|
||||
assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
|
||||
"Metaspace size is too big");
|
||||
|
||||
ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
|
||||
os::vm_allocation_granularity(),
|
||||
false, requested_addr, 0);
|
||||
if (!metaspace_rs.is_reserved()) {
|
||||
if (UseSharedSpaces) {
|
||||
// Keep trying to allocate the metaspace, increasing the requested_addr
|
||||
// by 1GB each time, until we reach an address that will no longer allow
|
||||
// use of CDS with compressed klass pointers.
|
||||
char *addr = requested_addr;
|
||||
while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
|
||||
can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
|
||||
addr = addr + 1*G;
|
||||
metaspace_rs = ReservedSpace(class_metaspace_size(),
|
||||
os::vm_allocation_granularity(), false, addr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// If no successful allocation then try to allocate the space anywhere. If
|
||||
// that fails then OOM doom. At this point we cannot try allocating the
|
||||
// metaspace as if UseCompressedKlassPointers is off because too much
|
||||
// initialization has happened that depends on UseCompressedKlassPointers.
|
||||
// So, UseCompressedKlassPointers cannot be turned off at this point.
|
||||
if (!metaspace_rs.is_reserved()) {
|
||||
metaspace_rs = ReservedSpace(class_metaspace_size(),
|
||||
os::vm_allocation_granularity(), false);
|
||||
if (!metaspace_rs.is_reserved()) {
|
||||
vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
|
||||
class_metaspace_size()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we got here then the metaspace got allocated.
|
||||
MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
|
||||
|
||||
// Verify that we can use shared spaces. Otherwise, turn off CDS.
|
||||
if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
|
||||
FileMapInfo::stop_sharing_and_unmap(
|
||||
"Could not allocate metaspace at a compatible address");
|
||||
}
|
||||
|
||||
set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
|
||||
UseSharedSpaces ? (address)cds_base : 0);
|
||||
|
||||
initialize_class_space(metaspace_rs);
|
||||
|
||||
if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
|
||||
gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
|
||||
Universe::narrow_klass_base(), Universe::narrow_klass_shift());
|
||||
gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
|
||||
class_metaspace_size(), metaspace_rs.base(), requested_addr);
|
||||
}
|
||||
}
|
||||
|
||||
// For UseCompressedKlassPointers the class space is reserved above the top of
|
||||
// the Java heap. The argument passed in is at the base of the compressed space.
|
||||
void Metaspace::initialize_class_space(ReservedSpace rs) {
|
||||
// The reserved space size may be bigger because of alignment, esp with UseLargePages
|
||||
assert(rs.size() >= ClassMetaspaceSize,
|
||||
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
|
||||
assert(using_class_space(), "Must be using class space");
|
||||
_class_space_list = new VirtualSpaceList(rs);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void Metaspace::global_initialize() {
|
||||
// Initialize the alignment for shared spaces.
|
||||
int max_alignment = os::vm_page_size();
|
||||
size_t cds_total = 0;
|
||||
|
||||
set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
|
||||
os::vm_allocation_granularity()));
|
||||
|
||||
MetaspaceShared::set_max_alignment(max_alignment);
|
||||
|
||||
if (DumpSharedSpaces) {
|
||||
@ -2798,15 +2943,31 @@ void Metaspace::global_initialize() {
|
||||
// Initialize with the sum of the shared space sizes. The read-only
|
||||
// and read write metaspace chunks will be allocated out of this and the
|
||||
// remainder is the misc code and data chunks.
|
||||
size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
|
||||
SharedMiscDataSize + SharedMiscCodeSize,
|
||||
os::vm_allocation_granularity());
|
||||
size_t word_size = total/wordSize;
|
||||
_space_list = new VirtualSpaceList(word_size);
|
||||
cds_total = FileMapInfo::shared_spaces_size();
|
||||
_space_list = new VirtualSpaceList(cds_total/wordSize);
|
||||
|
||||
#ifdef _LP64
|
||||
// Set the compressed klass pointer base so that decoding of these pointers works
|
||||
// properly when creating the shared archive.
|
||||
assert(UseCompressedOops && UseCompressedKlassPointers,
|
||||
"UseCompressedOops and UseCompressedKlassPointers must be set");
|
||||
Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
|
||||
if (TraceMetavirtualspaceAllocation && Verbose) {
|
||||
gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
|
||||
_space_list->current_virtual_space()->bottom());
|
||||
}
|
||||
|
||||
// Set the shift to zero.
|
||||
assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
|
||||
"CDS region is too large");
|
||||
Universe::set_narrow_klass_shift(0);
|
||||
#endif
|
||||
|
||||
} else {
|
||||
// If using shared space, open the file that contains the shared space
|
||||
// and map in the memory before initializing the rest of metaspace (so
|
||||
// the addresses don't conflict)
|
||||
address cds_address = NULL;
|
||||
if (UseSharedSpaces) {
|
||||
FileMapInfo* mapinfo = new FileMapInfo();
|
||||
memset(mapinfo, 0, sizeof(FileMapInfo));
|
||||
@ -2821,8 +2982,22 @@ void Metaspace::global_initialize() {
|
||||
assert(!mapinfo->is_open() && !UseSharedSpaces,
|
||||
"archive file not closed or shared spaces not disabled.");
|
||||
}
|
||||
cds_total = FileMapInfo::shared_spaces_size();
|
||||
cds_address = (address)mapinfo->region_base(0);
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
// If UseCompressedKlassPointers is set then allocate the metaspace area
|
||||
// above the heap and above the CDS area (if it exists).
|
||||
if (using_class_space()) {
|
||||
if (UseSharedSpaces) {
|
||||
allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
|
||||
} else {
|
||||
allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Initialize these before initializing the VirtualSpaceList
|
||||
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
|
||||
_first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
|
||||
@ -2840,39 +3015,28 @@ void Metaspace::global_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
// For UseCompressedKlassPointers the class space is reserved as a piece of the
|
||||
// Java heap because the compression algorithm is the same for each. The
|
||||
// argument passed in is at the top of the compressed space
|
||||
void Metaspace::initialize_class_space(ReservedSpace rs) {
|
||||
// The reserved space size may be bigger because of alignment, esp with UseLargePages
|
||||
assert(rs.size() >= ClassMetaspaceSize,
|
||||
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
|
||||
_class_space_list = new VirtualSpaceList(rs);
|
||||
}
|
||||
|
||||
void Metaspace::initialize(Mutex* lock,
|
||||
MetaspaceType type) {
|
||||
void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
|
||||
|
||||
assert(space_list() != NULL,
|
||||
"Metadata VirtualSpaceList has not been initialized");
|
||||
|
||||
_vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
|
||||
_vsm = new SpaceManager(NonClassType, lock, space_list());
|
||||
if (_vsm == NULL) {
|
||||
return;
|
||||
}
|
||||
size_t word_size;
|
||||
size_t class_word_size;
|
||||
vsm()->get_initial_chunk_sizes(type,
|
||||
&word_size,
|
||||
&class_word_size);
|
||||
vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
|
||||
|
||||
assert(class_space_list() != NULL,
|
||||
"Class VirtualSpaceList has not been initialized");
|
||||
if (using_class_space()) {
|
||||
assert(class_space_list() != NULL,
|
||||
"Class VirtualSpaceList has not been initialized");
|
||||
|
||||
// Allocate SpaceManager for classes.
|
||||
_class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
|
||||
if (_class_vsm == NULL) {
|
||||
return;
|
||||
// Allocate SpaceManager for classes.
|
||||
_class_vsm = new SpaceManager(ClassType, lock, class_space_list());
|
||||
if (_class_vsm == NULL) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
|
||||
@ -2888,11 +3052,13 @@ void Metaspace::initialize(Mutex* lock,
|
||||
}
|
||||
|
||||
// Allocate chunk for class metadata objects
|
||||
Metachunk* class_chunk =
|
||||
class_space_list()->get_initialization_chunk(class_word_size,
|
||||
class_vsm()->medium_chunk_bunch());
|
||||
if (class_chunk != NULL) {
|
||||
class_vsm()->add_chunk(class_chunk, true);
|
||||
if (using_class_space()) {
|
||||
Metachunk* class_chunk =
|
||||
class_space_list()->get_initialization_chunk(class_word_size,
|
||||
class_vsm()->medium_chunk_bunch());
|
||||
if (class_chunk != NULL) {
|
||||
class_vsm()->add_chunk(class_chunk, true);
|
||||
}
|
||||
}
|
||||
|
||||
_alloc_record_head = NULL;
|
||||
@ -2906,7 +3072,8 @@ size_t Metaspace::align_word_size_up(size_t word_size) {
|
||||
|
||||
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
|
||||
// DumpSharedSpaces doesn't use class metadata area (yet)
|
||||
if (mdtype == ClassType && !DumpSharedSpaces) {
|
||||
// Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
|
||||
if (mdtype == ClassType && using_class_space()) {
|
||||
return class_vsm()->allocate(word_size);
|
||||
} else {
|
||||
return vsm()->allocate(word_size);
|
||||
@ -2937,14 +3104,19 @@ char* Metaspace::bottom() const {
|
||||
}
|
||||
|
||||
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
|
||||
// return vsm()->allocated_used_words();
|
||||
return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
|
||||
vsm()->sum_used_in_chunks_in_use(); // includes overhead!
|
||||
if (mdtype == ClassType) {
|
||||
return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
|
||||
} else {
|
||||
return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
|
||||
}
|
||||
}
|
||||
|
||||
size_t Metaspace::free_words(MetadataType mdtype) const {
|
||||
return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
|
||||
vsm()->sum_free_in_chunks_in_use();
|
||||
if (mdtype == ClassType) {
|
||||
return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
|
||||
} else {
|
||||
return vsm()->sum_free_in_chunks_in_use();
|
||||
}
|
||||
}
|
||||
|
||||
// Space capacity in the Metaspace. It includes
|
||||
@ -2953,8 +3125,11 @@ size_t Metaspace::free_words(MetadataType mdtype) const {
|
||||
// in the space available in the dictionary which
|
||||
// is already counted in some chunk.
|
||||
size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
|
||||
return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
|
||||
vsm()->sum_capacity_in_chunks_in_use();
|
||||
if (mdtype == ClassType) {
|
||||
return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
|
||||
} else {
|
||||
return vsm()->sum_capacity_in_chunks_in_use();
|
||||
}
|
||||
}
|
||||
|
||||
size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
|
||||
@ -2977,8 +3152,8 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
if (is_class) {
|
||||
class_vsm()->deallocate(ptr, word_size);
|
||||
if (is_class && using_class_space()) {
|
||||
class_vsm()->deallocate(ptr, word_size);
|
||||
} else {
|
||||
vsm()->deallocate(ptr, word_size);
|
||||
}
|
||||
@ -2992,7 +3167,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
if (is_class) {
|
||||
if (is_class && using_class_space()) {
|
||||
class_vsm()->deallocate(ptr, word_size);
|
||||
} else {
|
||||
vsm()->deallocate(ptr, word_size);
|
||||
@ -3101,14 +3276,18 @@ void Metaspace::purge() {
|
||||
MutexLockerEx cl(SpaceManager::expand_lock(),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
space_list()->purge();
|
||||
class_space_list()->purge();
|
||||
if (using_class_space()) {
|
||||
class_space_list()->purge();
|
||||
}
|
||||
}
|
||||
|
||||
void Metaspace::print_on(outputStream* out) const {
|
||||
// Print both class virtual space counts and metaspace.
|
||||
if (Verbose) {
|
||||
vsm()->print_on(out);
|
||||
vsm()->print_on(out);
|
||||
if (using_class_space()) {
|
||||
class_vsm()->print_on(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3122,17 +3301,21 @@ bool Metaspace::contains(const void * ptr) {
|
||||
// be needed. Note, locking this can cause inversion problems with the
|
||||
// caller in MetaspaceObj::is_metadata() function.
|
||||
return space_list()->contains(ptr) ||
|
||||
class_space_list()->contains(ptr);
|
||||
(using_class_space() && class_space_list()->contains(ptr));
|
||||
}
|
||||
|
||||
void Metaspace::verify() {
|
||||
vsm()->verify();
|
||||
class_vsm()->verify();
|
||||
if (using_class_space()) {
|
||||
class_vsm()->verify();
|
||||
}
|
||||
}
|
||||
|
||||
void Metaspace::dump(outputStream* const out) const {
|
||||
out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
|
||||
vsm()->dump(out);
|
||||
out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
|
||||
class_vsm()->dump(out);
|
||||
if (using_class_space()) {
|
||||
out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
|
||||
class_vsm()->dump(out);
|
||||
}
|
||||
}
|
||||
|
@ -105,6 +105,16 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
// Align up the word size to the allocation word size
|
||||
static size_t align_word_size_up(size_t);
|
||||
|
||||
// Aligned size of the metaspace.
|
||||
static size_t _class_metaspace_size;
|
||||
|
||||
static size_t class_metaspace_size() {
|
||||
return _class_metaspace_size;
|
||||
}
|
||||
static void set_class_metaspace_size(size_t metaspace_size) {
|
||||
_class_metaspace_size = metaspace_size;
|
||||
}
|
||||
|
||||
static size_t _first_chunk_word_size;
|
||||
static size_t _first_class_chunk_word_size;
|
||||
|
||||
@ -131,6 +141,17 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
// maintain a single list for now.
|
||||
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
|
||||
|
||||
#ifdef _LP64
|
||||
static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
|
||||
|
||||
// Returns true if can use CDS with metaspace allocated as specified address.
|
||||
static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
|
||||
|
||||
static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
|
||||
|
||||
static void initialize_class_space(ReservedSpace rs);
|
||||
#endif
|
||||
|
||||
class AllocRecord : public CHeapObj<mtClass> {
|
||||
public:
|
||||
AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
|
||||
@ -151,7 +172,6 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
|
||||
// Initialize globals for Metaspace
|
||||
static void global_initialize();
|
||||
static void initialize_class_space(ReservedSpace rs);
|
||||
|
||||
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
|
||||
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
|
||||
@ -172,8 +192,6 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
MetaWord* expand_and_allocate(size_t size,
|
||||
MetadataType mdtype);
|
||||
|
||||
static bool is_initialized() { return _class_space_list != NULL; }
|
||||
|
||||
static bool contains(const void *ptr);
|
||||
void dump(outputStream* const out) const;
|
||||
|
||||
@ -190,6 +208,12 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
};
|
||||
|
||||
void iterate(AllocRecordClosure *closure);
|
||||
|
||||
// Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
|
||||
static bool using_class_space() {
|
||||
return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
class MetaspaceAux : AllStatic {
|
||||
@ -243,8 +267,9 @@ class MetaspaceAux : AllStatic {
|
||||
return _allocated_capacity_words[mdtype];
|
||||
}
|
||||
static size_t allocated_capacity_words() {
|
||||
return _allocated_capacity_words[Metaspace::ClassType] +
|
||||
_allocated_capacity_words[Metaspace::NonClassType];
|
||||
return _allocated_capacity_words[Metaspace::NonClassType] +
|
||||
(Metaspace::using_class_space() ?
|
||||
_allocated_capacity_words[Metaspace::ClassType] : 0);
|
||||
}
|
||||
static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
|
||||
return allocated_capacity_words(mdtype) * BytesPerWord;
|
||||
@ -257,8 +282,9 @@ class MetaspaceAux : AllStatic {
|
||||
return _allocated_used_words[mdtype];
|
||||
}
|
||||
static size_t allocated_used_words() {
|
||||
return _allocated_used_words[Metaspace::ClassType] +
|
||||
_allocated_used_words[Metaspace::NonClassType];
|
||||
return _allocated_used_words[Metaspace::NonClassType] +
|
||||
(Metaspace::using_class_space() ?
|
||||
_allocated_used_words[Metaspace::ClassType] : 0);
|
||||
}
|
||||
static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
|
||||
return allocated_used_words(mdtype) * BytesPerWord;
|
||||
@ -300,6 +326,7 @@ class MetaspaceAux : AllStatic {
|
||||
static void print_on(outputStream * out);
|
||||
static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
|
||||
|
||||
static void print_class_waste(outputStream* out);
|
||||
static void print_waste(outputStream* out);
|
||||
static void dump(outputStream* out);
|
||||
static void verify_free_chunks();
|
||||
|
@ -52,7 +52,6 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
|
||||
int tag = 0;
|
||||
soc->do_tag(--tag);
|
||||
|
||||
assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
|
||||
// Verify the sizes of various metadata in the system.
|
||||
soc->do_tag(sizeof(Method));
|
||||
soc->do_tag(sizeof(ConstMethod));
|
||||
|
@ -145,8 +145,6 @@ NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
|
||||
NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
|
||||
address Universe::_narrow_ptrs_base;
|
||||
|
||||
size_t Universe::_class_metaspace_size;
|
||||
|
||||
void Universe::basic_type_classes_do(void f(Klass*)) {
|
||||
f(boolArrayKlassObj());
|
||||
f(byteArrayKlassObj());
|
||||
@ -641,6 +639,8 @@ jint universe_init() {
|
||||
return status;
|
||||
}
|
||||
|
||||
Metaspace::global_initialize();
|
||||
|
||||
// Create memory for metadata. Must be after initializing heap for
|
||||
// DumpSharedSpaces.
|
||||
ClassLoaderData::init_null_class_loader_data();
|
||||
@ -693,13 +693,9 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
||||
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
|
||||
base = HeapBaseMinAddress;
|
||||
|
||||
// If the total size and the metaspace size are small enough to allow
|
||||
// UnscaledNarrowOop then just use UnscaledNarrowOop.
|
||||
} else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
|
||||
(!UseCompressedKlassPointers ||
|
||||
(((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
|
||||
// We don't need to check the metaspace size here because it is always smaller
|
||||
// than total_size.
|
||||
// If the total size is small enough to allow UnscaledNarrowOop then
|
||||
// just use UnscaledNarrowOop.
|
||||
} else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
|
||||
if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
|
||||
(Universe::narrow_oop_shift() == 0)) {
|
||||
// Use 32-bits oops without encoding and
|
||||
@ -716,13 +712,6 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
||||
base = (OopEncodingHeapMax - heap_size);
|
||||
}
|
||||
}
|
||||
|
||||
// See if ZeroBaseNarrowOop encoding will work for a heap based at
|
||||
// (KlassEncodingMetaspaceMax - class_metaspace_size()).
|
||||
} else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
|
||||
(Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
|
||||
(KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
|
||||
base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
|
||||
} else {
|
||||
// UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
|
||||
// HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
|
||||
@ -732,8 +721,7 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
||||
// Set narrow_oop_base and narrow_oop_use_implicit_null_checks
|
||||
// used in ReservedHeapSpace() constructors.
|
||||
// The final values will be set in initialize_heap() below.
|
||||
if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
|
||||
(!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
|
||||
if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
|
||||
// Use zero based compressed oops
|
||||
Universe::set_narrow_oop_base(NULL);
|
||||
// Don't need guard page for implicit checks in indexed
|
||||
@ -816,9 +804,7 @@ jint Universe::initialize_heap() {
|
||||
tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
|
||||
Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
|
||||
}
|
||||
if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
|
||||
(UseCompressedKlassPointers &&
|
||||
((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
|
||||
if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
|
||||
// Can't reserve heap below 32Gb.
|
||||
// keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
|
||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
@ -849,20 +835,16 @@ jint Universe::initialize_heap() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (verbose) {
|
||||
tty->cr();
|
||||
tty->cr();
|
||||
}
|
||||
if (UseCompressedKlassPointers) {
|
||||
Universe::set_narrow_klass_base(Universe::narrow_oop_base());
|
||||
Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
|
||||
}
|
||||
Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
|
||||
}
|
||||
// Universe::narrow_oop_base() is one page below the metaspace
|
||||
// base. The actual metaspace base depends on alignment constraints
|
||||
// so we don't know its exact location here.
|
||||
assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
|
||||
// Universe::narrow_oop_base() is one page below the heap.
|
||||
assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
|
||||
os::vm_page_size()) ||
|
||||
Universe::narrow_oop_base() == NULL, "invalid value");
|
||||
assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
|
||||
Universe::narrow_oop_shift() == 0, "invalid value");
|
||||
@ -882,12 +864,7 @@ jint Universe::initialize_heap() {
|
||||
|
||||
// Reserve the Java heap, which is now the same for all GCs.
|
||||
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||
// Add in the class metaspace area so the classes in the headers can
|
||||
// be compressed the same as instances.
|
||||
// Need to round class space size up because it's below the heap and
|
||||
// the actual alignment depends on its size.
|
||||
Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
|
||||
size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
|
||||
size_t total_reserved = align_size_up(heap_size, alignment);
|
||||
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
|
||||
"heap size is too big for compressed oops");
|
||||
char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
|
||||
@ -923,28 +900,17 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||
return total_rs;
|
||||
}
|
||||
|
||||
// Split the reserved space into main Java heap and a space for
|
||||
// classes so that they can be compressed using the same algorithm
|
||||
// as compressed oops. If compress oops and compress klass ptrs are
|
||||
// used we need the meta space first: if the alignment used for
|
||||
// compressed oops is greater than the one used for compressed klass
|
||||
// ptrs, a metadata space on top of the heap could become
|
||||
// unreachable.
|
||||
ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
|
||||
ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
|
||||
Metaspace::initialize_class_space(class_rs);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
// Universe::initialize_heap() will reset this to NULL if unscaled
|
||||
// or zero-based narrow oops are actually used.
|
||||
address base = (address)(total_rs.base() - os::vm_page_size());
|
||||
Universe::set_narrow_oop_base(base);
|
||||
}
|
||||
return heap_rs;
|
||||
return total_rs;
|
||||
}
|
||||
|
||||
|
||||
// It's the caller's repsonsibility to ensure glitch-freedom
|
||||
// It's the caller's responsibility to ensure glitch-freedom
|
||||
// (if required).
|
||||
void Universe::update_heap_info_at_gc() {
|
||||
_heap_capacity_at_last_gc = heap()->capacity();
|
||||
|
@ -75,10 +75,10 @@ class LatestMethodCache : public CHeapObj<mtClass> {
|
||||
};
|
||||
|
||||
|
||||
// For UseCompressedOops and UseCompressedKlassPointers.
|
||||
// For UseCompressedOops.
|
||||
struct NarrowPtrStruct {
|
||||
// Base address for oop/klass-within-java-object materialization.
|
||||
// NULL if using wide oops/klasses or zero based narrow oops/klasses.
|
||||
// Base address for oop-within-java-object materialization.
|
||||
// NULL if using wide oops or zero based narrow oops.
|
||||
address _base;
|
||||
// Number of shift bits for encoding/decoding narrow ptrs.
|
||||
// 0 if using wide ptrs or zero based unscaled narrow ptrs,
|
||||
@ -106,6 +106,7 @@ class Universe: AllStatic {
|
||||
friend class SystemDictionary;
|
||||
friend class VMStructs;
|
||||
friend class VM_PopulateDumpSharedSpace;
|
||||
friend class Metaspace;
|
||||
|
||||
friend jint universe_init();
|
||||
friend void universe2_init();
|
||||
@ -184,9 +185,6 @@ class Universe: AllStatic {
|
||||
static struct NarrowPtrStruct _narrow_klass;
|
||||
static address _narrow_ptrs_base;
|
||||
|
||||
// Aligned size of the metaspace.
|
||||
static size_t _class_metaspace_size;
|
||||
|
||||
// array of dummy objects used with +FullGCAlot
|
||||
debug_only(static objArrayOop _fullgc_alot_dummy_array;)
|
||||
// index of next entry to clear
|
||||
@ -238,15 +236,6 @@ class Universe: AllStatic {
|
||||
assert(UseCompressedOops, "no compressed ptrs?");
|
||||
_narrow_oop._use_implicit_null_checks = use;
|
||||
}
|
||||
static bool reserve_metaspace_helper(bool with_base = false);
|
||||
static ReservedHeapSpace reserve_heap_metaspace(size_t heap_size, size_t alignment, bool& contiguous);
|
||||
|
||||
static size_t class_metaspace_size() {
|
||||
return _class_metaspace_size;
|
||||
}
|
||||
static void set_class_metaspace_size(size_t metaspace_size) {
|
||||
_class_metaspace_size = metaspace_size;
|
||||
}
|
||||
|
||||
// Debugging
|
||||
static int _verify_count; // number of verifies done
|
||||
|
@ -703,6 +703,16 @@ class Klass : public Metadata {
|
||||
|
||||
virtual void oop_verify_on(oop obj, outputStream* st);
|
||||
|
||||
static bool is_null(narrowKlass obj);
|
||||
static bool is_null(Klass* obj);
|
||||
|
||||
// klass encoding for klass pointer in objects.
|
||||
static narrowKlass encode_klass_not_null(Klass* v);
|
||||
static narrowKlass encode_klass(Klass* v);
|
||||
|
||||
static Klass* decode_klass_not_null(narrowKlass v);
|
||||
static Klass* decode_klass(narrowKlass v);
|
||||
|
||||
private:
|
||||
// barriers used by klass_oop_store
|
||||
void klass_update_barrier_set(oop v);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_OOPS_KLASS_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_KLASS_INLINE_HPP
|
||||
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
|
||||
@ -33,4 +34,41 @@ inline void Klass::set_prototype_header(markOop header) {
|
||||
_prototype_header = header;
|
||||
}
|
||||
|
||||
inline bool Klass::is_null(Klass* obj) { return obj == NULL; }
|
||||
inline bool Klass::is_null(narrowKlass obj) { return obj == 0; }
|
||||
|
||||
// Encoding and decoding for klass field.
|
||||
|
||||
inline bool check_klass_alignment(Klass* obj) {
|
||||
return (intptr_t)obj % KlassAlignmentInBytes == 0;
|
||||
}
|
||||
|
||||
inline narrowKlass Klass::encode_klass_not_null(Klass* v) {
|
||||
assert(!is_null(v), "klass value can never be zero");
|
||||
assert(check_klass_alignment(v), "Address not aligned");
|
||||
int shift = Universe::narrow_klass_shift();
|
||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, Universe::narrow_klass_base(), 1));
|
||||
assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
|
||||
uint64_t result = pd >> shift;
|
||||
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
|
||||
assert(decode_klass(result) == v, "reversibility");
|
||||
return (narrowKlass)result;
|
||||
}
|
||||
|
||||
inline narrowKlass Klass::encode_klass(Klass* v) {
|
||||
return is_null(v) ? (narrowKlass)0 : encode_klass_not_null(v);
|
||||
}
|
||||
|
||||
inline Klass* Klass::decode_klass_not_null(narrowKlass v) {
|
||||
assert(!is_null(v), "narrow klass value can never be zero");
|
||||
int shift = Universe::narrow_klass_shift();
|
||||
Klass* result = (Klass*)(void*)((uintptr_t)Universe::narrow_klass_base() + ((uintptr_t)v << shift));
|
||||
assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
|
||||
return result;
|
||||
}
|
||||
|
||||
inline Klass* Klass::decode_klass(narrowKlass v) {
|
||||
return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_OOPS_KLASS_INLINE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,7 +62,7 @@ class oopDesc {
|
||||
volatile markOop _mark;
|
||||
union _metadata {
|
||||
Klass* _klass;
|
||||
narrowOop _compressed_klass;
|
||||
narrowKlass _compressed_klass;
|
||||
} _metadata;
|
||||
|
||||
// Fast access to barrier set. Must be initialized.
|
||||
@ -84,7 +84,7 @@ class oopDesc {
|
||||
Klass* klass() const;
|
||||
Klass* klass_or_null() const volatile;
|
||||
Klass** klass_addr();
|
||||
narrowOop* compressed_klass_addr();
|
||||
narrowKlass* compressed_klass_addr();
|
||||
|
||||
void set_klass(Klass* k);
|
||||
|
||||
@ -189,13 +189,6 @@ class oopDesc {
|
||||
oop compare_value,
|
||||
bool prebarrier = false);
|
||||
|
||||
// klass encoding for klass pointer in objects.
|
||||
static narrowOop encode_klass_not_null(Klass* v);
|
||||
static narrowOop encode_klass(Klass* v);
|
||||
|
||||
static Klass* decode_klass_not_null(narrowOop v);
|
||||
static Klass* decode_klass(narrowOop v);
|
||||
|
||||
// Access to fields in a instanceOop through these methods.
|
||||
oop obj_field(int offset) const;
|
||||
volatile oop obj_field_volatile(int offset) const;
|
||||
|
@ -35,7 +35,7 @@
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
#include "oops/arrayKlass.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
@ -70,7 +70,7 @@ inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
|
||||
|
||||
inline Klass* oopDesc::klass() const {
|
||||
if (UseCompressedKlassPointers) {
|
||||
return decode_klass_not_null(_metadata._compressed_klass);
|
||||
return Klass::decode_klass_not_null(_metadata._compressed_klass);
|
||||
} else {
|
||||
return _metadata._klass;
|
||||
}
|
||||
@ -79,7 +79,7 @@ inline Klass* oopDesc::klass() const {
|
||||
inline Klass* oopDesc::klass_or_null() const volatile {
|
||||
// can be NULL in CMS
|
||||
if (UseCompressedKlassPointers) {
|
||||
return decode_klass(_metadata._compressed_klass);
|
||||
return Klass::decode_klass(_metadata._compressed_klass);
|
||||
} else {
|
||||
return _metadata._klass;
|
||||
}
|
||||
@ -87,7 +87,7 @@ inline Klass* oopDesc::klass_or_null() const volatile {
|
||||
|
||||
inline int oopDesc::klass_gap_offset_in_bytes() {
|
||||
assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
|
||||
return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
|
||||
return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
|
||||
}
|
||||
|
||||
inline Klass** oopDesc::klass_addr() {
|
||||
@ -97,9 +97,9 @@ inline Klass** oopDesc::klass_addr() {
|
||||
return (Klass**) &_metadata._klass;
|
||||
}
|
||||
|
||||
inline narrowOop* oopDesc::compressed_klass_addr() {
|
||||
inline narrowKlass* oopDesc::compressed_klass_addr() {
|
||||
assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
|
||||
return (narrowOop*) &_metadata._compressed_klass;
|
||||
return &_metadata._compressed_klass;
|
||||
}
|
||||
|
||||
inline void oopDesc::set_klass(Klass* k) {
|
||||
@ -107,7 +107,7 @@ inline void oopDesc::set_klass(Klass* k) {
|
||||
assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
|
||||
assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
|
||||
if (UseCompressedKlassPointers) {
|
||||
*compressed_klass_addr() = encode_klass_not_null(k);
|
||||
*compressed_klass_addr() = Klass::encode_klass_not_null(k);
|
||||
} else {
|
||||
*klass_addr() = k;
|
||||
}
|
||||
@ -127,7 +127,7 @@ inline void oopDesc::set_klass_to_list_ptr(oop k) {
|
||||
// This is only to be used during GC, for from-space objects, so no
|
||||
// barrier is needed.
|
||||
if (UseCompressedKlassPointers) {
|
||||
_metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling)
|
||||
_metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling)
|
||||
} else {
|
||||
_metadata._klass = (Klass*)(address)k;
|
||||
}
|
||||
@ -136,7 +136,7 @@ inline void oopDesc::set_klass_to_list_ptr(oop k) {
|
||||
inline oop oopDesc::list_ptr_from_klass() {
|
||||
// This is only to be used during GC, for from-space objects.
|
||||
if (UseCompressedKlassPointers) {
|
||||
return decode_heap_oop(_metadata._compressed_klass);
|
||||
return decode_heap_oop((narrowOop)_metadata._compressed_klass);
|
||||
} else {
|
||||
// Special case for GC
|
||||
return (oop)(address)_metadata._klass;
|
||||
@ -176,7 +176,6 @@ inline address* oopDesc::address_field_addr(int offset) const { return (address
|
||||
// the right type and inlines the appopriate code).
|
||||
|
||||
inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
|
||||
inline bool oopDesc::is_null(Klass* obj) { return obj == NULL; }
|
||||
inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
|
||||
|
||||
// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
|
||||
@ -186,9 +185,6 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
|
||||
inline bool check_obj_alignment(oop obj) {
|
||||
return (intptr_t)obj % MinObjAlignmentInBytes == 0;
|
||||
}
|
||||
inline bool check_klass_alignment(Klass* obj) {
|
||||
return (intptr_t)obj % KlassAlignmentInBytes == 0;
|
||||
}
|
||||
|
||||
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
|
||||
assert(!is_null(v), "oop value can never be zero");
|
||||
@ -224,39 +220,6 @@ inline oop oopDesc::decode_heap_oop(narrowOop v) {
|
||||
inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
|
||||
inline oop oopDesc::decode_heap_oop(oop v) { return v; }
|
||||
|
||||
// Encoding and decoding for klass field. It is copied code, but someday
|
||||
// might not be the same as oop.
|
||||
|
||||
inline narrowOop oopDesc::encode_klass_not_null(Klass* v) {
|
||||
assert(!is_null(v), "klass value can never be zero");
|
||||
assert(check_klass_alignment(v), "Address not aligned");
|
||||
address base = Universe::narrow_klass_base();
|
||||
int shift = Universe::narrow_klass_shift();
|
||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
|
||||
assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
|
||||
uint64_t result = pd >> shift;
|
||||
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
|
||||
assert(decode_klass(result) == v, "reversibility");
|
||||
return (narrowOop)result;
|
||||
}
|
||||
|
||||
inline narrowOop oopDesc::encode_klass(Klass* v) {
|
||||
return (is_null(v)) ? (narrowOop)0 : encode_klass_not_null(v);
|
||||
}
|
||||
|
||||
inline Klass* oopDesc::decode_klass_not_null(narrowOop v) {
|
||||
assert(!is_null(v), "narrow oop value can never be zero");
|
||||
address base = Universe::narrow_klass_base();
|
||||
int shift = Universe::narrow_klass_shift();
|
||||
Klass* result = (Klass*)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
|
||||
assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
|
||||
return result;
|
||||
}
|
||||
|
||||
inline Klass* oopDesc::decode_klass(narrowOop v) {
|
||||
return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
|
||||
}
|
||||
|
||||
// Load an oop out of the Java heap as is without decoding.
|
||||
// Called by GC to check for null before decoding.
|
||||
inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,6 +33,10 @@
|
||||
// of B, A's representation is a prefix of B's representation.
|
||||
|
||||
typedef juint narrowOop; // Offset instead of address for an oop within a java object
|
||||
|
||||
// If compressed klass pointers then use narrowKlass.
|
||||
typedef juint narrowKlass;
|
||||
|
||||
typedef void* OopOrNarrowOopStar;
|
||||
typedef class markOopDesc* markOop;
|
||||
|
||||
|
@ -1393,10 +1393,8 @@ bool verify_object_alignment() {
|
||||
|
||||
inline uintx max_heap_for_compressed_oops() {
|
||||
// Avoid sign flip.
|
||||
if (OopEncodingHeapMax < ClassMetaspaceSize + os::vm_page_size()) {
|
||||
return 0;
|
||||
}
|
||||
LP64_ONLY(return OopEncodingHeapMax - ClassMetaspaceSize - os::vm_page_size());
|
||||
assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
|
||||
LP64_ONLY(return OopEncodingHeapMax - os::vm_page_size());
|
||||
NOT_LP64(ShouldNotReachHere(); return 0);
|
||||
}
|
||||
|
||||
@ -1448,6 +1446,35 @@ void Arguments::set_use_compressed_oops() {
|
||||
#endif // ZERO
|
||||
}
|
||||
|
||||
|
||||
// NOTE: set_use_compressed_klass_ptrs() must be called after calling
|
||||
// set_use_compressed_oops().
|
||||
void Arguments::set_use_compressed_klass_ptrs() {
|
||||
#ifndef ZERO
|
||||
#ifdef _LP64
|
||||
// UseCompressedOops must be on for UseCompressedKlassPointers to be on.
|
||||
if (!UseCompressedOops) {
|
||||
if (UseCompressedKlassPointers) {
|
||||
warning("UseCompressedKlassPointers requires UseCompressedOops");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
|
||||
} else {
|
||||
// Turn on UseCompressedKlassPointers too
|
||||
if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
|
||||
FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
|
||||
}
|
||||
// Check the ClassMetaspaceSize to make sure we use compressed klass ptrs.
|
||||
if (UseCompressedKlassPointers) {
|
||||
if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
|
||||
warning("Class metaspace size is too large for UseCompressedKlassPointers");
|
||||
FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // _LP64
|
||||
#endif // !ZERO
|
||||
}
|
||||
|
||||
void Arguments::set_ergonomics_flags() {
|
||||
|
||||
if (os::is_server_class_machine()) {
|
||||
@ -1470,7 +1497,8 @@ void Arguments::set_ergonomics_flags() {
|
||||
// server performance. On server class machines, keep the default
|
||||
// off unless it is asked for. Future work: either add bytecode rewriting
|
||||
// at link time, or rewrite bytecodes in non-shared methods.
|
||||
if (!DumpSharedSpaces && !RequireSharedSpaces) {
|
||||
if (!DumpSharedSpaces && !RequireSharedSpaces &&
|
||||
(FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
|
||||
no_shared_spaces();
|
||||
}
|
||||
}
|
||||
@ -1478,33 +1506,11 @@ void Arguments::set_ergonomics_flags() {
|
||||
#ifndef ZERO
|
||||
#ifdef _LP64
|
||||
set_use_compressed_oops();
|
||||
// UseCompressedOops must be on for UseCompressedKlassPointers to be on.
|
||||
if (!UseCompressedOops) {
|
||||
if (UseCompressedKlassPointers) {
|
||||
warning("UseCompressedKlassPointers requires UseCompressedOops");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
|
||||
} else {
|
||||
// Turn on UseCompressedKlassPointers too
|
||||
if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
|
||||
FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
|
||||
}
|
||||
// Set the ClassMetaspaceSize to something that will not need to be
|
||||
// expanded, since it cannot be expanded.
|
||||
if (UseCompressedKlassPointers) {
|
||||
if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
|
||||
warning("Class metaspace size is too large for UseCompressedKlassPointers");
|
||||
FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
|
||||
} else if (FLAG_IS_DEFAULT(ClassMetaspaceSize)) {
|
||||
// 100,000 classes seems like a good size, so 100M assumes around 1K
|
||||
// per klass. The vtable and oopMap is embedded so we don't have a fixed
|
||||
// size per klass. Eventually, this will be parameterized because it
|
||||
// would also be useful to determine the optimal size of the
|
||||
// systemDictionary.
|
||||
FLAG_SET_ERGO(uintx, ClassMetaspaceSize, 100*M);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set_use_compressed_klass_ptrs() must be called after calling
|
||||
// set_use_compressed_oops().
|
||||
set_use_compressed_klass_ptrs();
|
||||
|
||||
// Also checks that certain machines are slower with compressed oops
|
||||
// in vm_version initialization code.
|
||||
#endif // _LP64
|
||||
@ -2153,7 +2159,7 @@ bool Arguments::check_vm_args_consistency() {
|
||||
|
||||
status = status && verify_object_alignment();
|
||||
|
||||
status = status && verify_min_value(ClassMetaspaceSize, 1*M,
|
||||
status = status && verify_interval(ClassMetaspaceSize, 1*M, 3*G,
|
||||
"ClassMetaspaceSize");
|
||||
|
||||
status = status && verify_interval(MarkStackSizeMax,
|
||||
@ -3273,33 +3279,22 @@ jint Arguments::parse_options_environment_variable(const char* name, SysClassPat
|
||||
}
|
||||
|
||||
void Arguments::set_shared_spaces_flags() {
|
||||
#ifdef _LP64
|
||||
const bool must_share = DumpSharedSpaces || RequireSharedSpaces;
|
||||
|
||||
// CompressedOops cannot be used with CDS. The offsets of oopmaps and
|
||||
// static fields are incorrect in the archive. With some more clever
|
||||
// initialization, this restriction can probably be lifted.
|
||||
if (UseCompressedOops) {
|
||||
if (must_share) {
|
||||
warning("disabling compressed oops because of %s",
|
||||
DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on");
|
||||
FLAG_SET_CMDLINE(bool, UseCompressedOops, false);
|
||||
FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false);
|
||||
} else {
|
||||
// Prefer compressed oops to class data sharing
|
||||
if (UseSharedSpaces && Verbose) {
|
||||
warning("turning off use of shared archive because of compressed oops");
|
||||
}
|
||||
no_shared_spaces();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (DumpSharedSpaces) {
|
||||
if (RequireSharedSpaces) {
|
||||
warning("cannot dump shared archive while using shared archive");
|
||||
}
|
||||
UseSharedSpaces = false;
|
||||
#ifdef _LP64
|
||||
if (!UseCompressedOops || !UseCompressedKlassPointers) {
|
||||
vm_exit_during_initialization(
|
||||
"Cannot dump shared archive when UseCompressedOops or UseCompressedKlassPointers is off.", NULL);
|
||||
}
|
||||
} else {
|
||||
// UseCompressedOops and UseCompressedKlassPointers must be on for UseSharedSpaces.
|
||||
if (!UseCompressedOops || !UseCompressedKlassPointers) {
|
||||
no_shared_spaces();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -309,6 +309,7 @@ class Arguments : AllStatic {
|
||||
static void set_g1_gc_flags();
|
||||
// GC ergonomics
|
||||
static void set_use_compressed_oops();
|
||||
static void set_use_compressed_klass_ptrs();
|
||||
static void set_ergonomics_flags();
|
||||
static void set_shared_spaces_flags();
|
||||
// limits the given memory size by the maximum amount of memory this process is
|
||||
|
@ -3036,7 +3036,7 @@ class CommandLineFlags {
|
||||
product(uintx, MaxMetaspaceSize, max_uintx, \
|
||||
"Maximum size of Metaspaces (in bytes)") \
|
||||
\
|
||||
product(uintx, ClassMetaspaceSize, 2*M, \
|
||||
product(uintx, ClassMetaspaceSize, 1*G, \
|
||||
"Maximum size of InstanceKlass area in Metaspace used for " \
|
||||
"UseCompressedKlassPointers") \
|
||||
\
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -95,7 +95,6 @@ jint init_globals() {
|
||||
management_init();
|
||||
bytecodes_init();
|
||||
classLoader_init();
|
||||
Metaspace::global_initialize(); // must be before codeCache
|
||||
codeCache_init();
|
||||
VM_Version_init();
|
||||
os_init_globals();
|
||||
|
@ -362,6 +362,8 @@ const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize;
|
||||
// Klass encoding metaspace max size
|
||||
const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
|
||||
|
||||
const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000)); // 32*G
|
||||
|
||||
// Machine dependent stuff
|
||||
|
||||
#ifdef TARGET_ARCH_x86
|
||||
|
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8003424
|
||||
* @summary Testing UseCompressedKlassPointers with CDS
|
||||
* @library /testlibrary
|
||||
* @run main CDSCompressedKPtrs
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.*;
|
||||
|
||||
public class CDSCompressedKPtrs {
|
||||
public static void main(String[] args) throws Exception {
|
||||
ProcessBuilder pb;
|
||||
if (Platform.is64bit()) {
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
|
||||
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
try {
|
||||
output.shouldContain("Loading classes to share");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
|
||||
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("sharing");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
} catch (RuntimeException e) {
|
||||
// Report 'passed' if CDS was turned off because we could not allocate
|
||||
// the klass metaspace at an address that would work with CDS.
|
||||
output.shouldContain("Could not allocate metaspace at a compatible address");
|
||||
output.shouldHaveExitValue(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8003424
|
||||
* @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off.
|
||||
* @library /testlibrary
|
||||
* @run main CDSCompressedKPtrsError
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.*;
|
||||
|
||||
public class CDSCompressedKPtrsError {
|
||||
public static void main(String[] args) throws Exception {
|
||||
ProcessBuilder pb;
|
||||
if (Platform.is64bit()) {
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
try {
|
||||
output.shouldContain("Loading classes to share");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops",
|
||||
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Unable to use shared archive");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops",
|
||||
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Unable to use shared archive");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops",
|
||||
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Unable to use shared archive");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
} catch (RuntimeException e) {
|
||||
output.shouldContain("Unable to use shared archive");
|
||||
output.shouldHaveExitValue(1);
|
||||
}
|
||||
|
||||
// Test bad options with -Xshare:dump.
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Cannot dump shared archive");
|
||||
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Cannot dump shared archive");
|
||||
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Cannot dump shared archive");
|
||||
|
||||
}
|
||||
}
|
||||
}
|
76
hotspot/test/runtime/CDSCompressedKPtrs/XShareAuto.java
Normal file
76
hotspot/test/runtime/CDSCompressedKPtrs/XShareAuto.java
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8005933
|
||||
* @summary Test that -Xshare:auto uses CDS when explicitly specified with -server.
|
||||
* @library /testlibrary
|
||||
* @run main XShareAuto
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.*;
|
||||
|
||||
public class XShareAuto {
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (!Platform.is64bit()) {
|
||||
System.out.println("ObjectAlignmentInBytes for CDS is only " +
|
||||
"supported on 64bit platforms; this plaform is " +
|
||||
System.getProperty("sun.arch.data.model"));
|
||||
System.out.println("Skipping the test");
|
||||
return;
|
||||
}
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
|
||||
"-Xshare:dump");
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Loading classes to share");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-server", "-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:SharedArchiveFile=./sample.jsa", "-version");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldNotContain("sharing");
|
||||
output.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-server", "-Xshare:auto", "-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:SharedArchiveFile=./sample.jsa", "-version");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
try {
|
||||
output.shouldContain("sharing");
|
||||
output.shouldHaveExitValue(0);
|
||||
} catch (RuntimeException e) {
|
||||
// If this failed then check that it would also be unable
|
||||
// to share even if -Xshare:on is specified. If so, then
|
||||
// return a success status.
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
"-server", "-Xshare:on", "-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:SharedArchiveFile=./sample.jsa", "-version");
|
||||
output = new OutputAnalyzer(pb.start());
|
||||
output.shouldContain("Could not allocate metaspace at a compatible address");
|
||||
output.shouldHaveExitValue(1);
|
||||
}
|
||||
}
|
||||
}
|
@ -84,8 +84,7 @@ public class CdsSameObjectAlignment {
|
||||
// there is a chance such reservation will fail
|
||||
// If it does, it is NOT considered a failure of the feature,
|
||||
// rather a possible expected outcome, though not likely
|
||||
output.shouldContain(
|
||||
"Unable to reserve shared space at required address");
|
||||
output.shouldContain("Could not allocate metaspace at a compatible address");
|
||||
output.shouldHaveExitValue(1);
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user