Merge
This commit is contained in:
commit
7c43a226b4
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,6 +32,7 @@
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "interpreter/bytecodeStream.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
@ -1627,7 +1628,7 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
|
||||
// Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
|
||||
// Also, first reinitialize vtable because it may have gotten out of synch
|
||||
// while the new class wasn't connected to the class hierarchy.
|
||||
Universe::flush_dependents_on(k);
|
||||
CodeCache::flush_dependents_on(k);
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1005,6 +1005,117 @@ void CodeCache::make_marked_nmethods_not_entrant() {
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes compiled methods dependent on dependee.
|
||||
void CodeCache::flush_dependents_on(instanceKlassHandle dependee) {
|
||||
assert_lock_strong(Compile_lock);
|
||||
|
||||
if (number_of_nmethods_with_dependencies() == 0) return;
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped during the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
KlassDepChange changes(dependee);
|
||||
|
||||
// Compute the dependent nmethods
|
||||
if (mark_for_deoptimization(changes) > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
VM_Deoptimize op;
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes compiled methods dependent on a particular CallSite
|
||||
// instance when its target is different than the given MethodHandle.
|
||||
void CodeCache::flush_dependents_on(Handle call_site, Handle method_handle) {
|
||||
assert_lock_strong(Compile_lock);
|
||||
|
||||
if (number_of_nmethods_with_dependencies() == 0) return;
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped during the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
CallSiteDepChange changes(call_site(), method_handle());
|
||||
|
||||
// Compute the dependent nmethods that have a reference to a
|
||||
// CallSite object. We use InstanceKlass::mark_dependent_nmethod
|
||||
// directly instead of CodeCache::mark_for_deoptimization because we
|
||||
// want dependents on the call site class only not all classes in
|
||||
// the ContextStream.
|
||||
int marked = 0;
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
|
||||
marked = call_site_klass->mark_dependent_nmethods(changes);
|
||||
}
|
||||
if (marked > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
VM_Deoptimize op;
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HOTSWAP
|
||||
// Flushes compiled methods dependent on dependee in the evolutionary sense
|
||||
void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
|
||||
// --- Compile_lock is not held. However we are at a safepoint.
|
||||
assert_locked_or_safepoint(Compile_lock);
|
||||
if (number_of_nmethods_with_dependencies() == 0) return;
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped during the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
// Compute the dependent nmethods
|
||||
if (mark_for_evol_deoptimization(ev_k_h) > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
|
||||
// All this already happens inside a VM_Operation, so we'll do all the work here.
|
||||
// Stuff copied from VM_Deoptimize and modified slightly.
|
||||
|
||||
// We do not want any GCs to happen while we are in the middle of this VM operation
|
||||
ResourceMark rm;
|
||||
DeoptimizationMarker dm;
|
||||
|
||||
// Deoptimize all activations depending on marked nmethods
|
||||
Deoptimization::deoptimize_dependents();
|
||||
|
||||
// Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
|
||||
make_marked_nmethods_not_entrant();
|
||||
}
|
||||
}
|
||||
#endif // HOTSWAP
|
||||
|
||||
|
||||
// Flushes compiled methods dependent on dependee
|
||||
void CodeCache::flush_dependents_on_method(methodHandle m_h) {
|
||||
// --- Compile_lock is not held. However we are at a safepoint.
|
||||
assert_locked_or_safepoint(Compile_lock);
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped dring the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
// Compute the dependent nmethods
|
||||
if (mark_for_deoptimization(m_h()) > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
|
||||
// All this already happens inside a VM_Operation, so we'll do all the work here.
|
||||
// Stuff copied from VM_Deoptimize and modified slightly.
|
||||
|
||||
// We do not want any GCs to happen while we are in the middle of this VM operation
|
||||
ResourceMark rm;
|
||||
DeoptimizationMarker dm;
|
||||
|
||||
// Deoptimize all activations depending on marked nmethods
|
||||
Deoptimization::deoptimize_dependents();
|
||||
|
||||
// Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
|
||||
make_marked_nmethods_not_entrant();
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::verify() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_HEAPS(heap) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -209,16 +209,28 @@ class CodeCache : AllStatic {
|
||||
static void verify_icholder_relocations();
|
||||
|
||||
// Deoptimization
|
||||
private:
|
||||
static int mark_for_deoptimization(DepChange& changes);
|
||||
#ifdef HOTSWAP
|
||||
static int mark_for_evol_deoptimization(instanceKlassHandle dependee);
|
||||
#endif // HOTSWAP
|
||||
|
||||
public:
|
||||
static void mark_all_nmethods_for_deoptimization();
|
||||
static int mark_for_deoptimization(Method* dependee);
|
||||
static void make_marked_nmethods_zombies();
|
||||
static void make_marked_nmethods_not_entrant();
|
||||
|
||||
// Flushing and deoptimization
|
||||
static void flush_dependents_on(instanceKlassHandle dependee);
|
||||
static void flush_dependents_on(Handle call_site, Handle method_handle);
|
||||
#ifdef HOTSWAP
|
||||
// Flushing and deoptimization in case of evolution
|
||||
static void flush_evol_dependents_on(instanceKlassHandle dependee);
|
||||
#endif // HOTSWAP
|
||||
// Support for fullspeed debugging
|
||||
static void flush_dependents_on_method(methodHandle dependee);
|
||||
|
||||
// tells how many nmethods have dependencies
|
||||
static int number_of_nmethods_with_dependencies();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -422,7 +422,7 @@ VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(
|
||||
bool large_pages = false; // No large pages when dumping the CDS archive.
|
||||
char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
|
||||
|
||||
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
|
||||
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
|
||||
if (_rs.is_reserved()) {
|
||||
assert(shared_base == 0 || _rs.base() == shared_base, "should match");
|
||||
} else {
|
||||
@ -3025,7 +3025,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
|
||||
ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
|
||||
_reserve_alignment,
|
||||
large_pages,
|
||||
requested_addr, 0);
|
||||
requested_addr);
|
||||
if (!metaspace_rs.is_reserved()) {
|
||||
#if INCLUDE_CDS
|
||||
if (UseSharedSpaces) {
|
||||
@ -3039,7 +3039,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
|
||||
can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
|
||||
addr = addr + increment;
|
||||
metaspace_rs = ReservedSpace(compressed_class_space_size(),
|
||||
_reserve_alignment, large_pages, addr, 0);
|
||||
_reserve_alignment, large_pages, addr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -694,103 +694,6 @@ jint universe_init() {
|
||||
// NarrowOopHeapBaseMin + heap_size < 32Gb
|
||||
// HeapBased - Use compressed oops with heap base + encoding.
|
||||
|
||||
// 4Gb
|
||||
static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
|
||||
// 32Gb
|
||||
// OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
|
||||
|
||||
char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
|
||||
assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
|
||||
assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
|
||||
assert(is_size_aligned(heap_size, alignment), "Must be");
|
||||
|
||||
uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
|
||||
|
||||
size_t base = 0;
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
assert(mode == UnscaledNarrowOop ||
|
||||
mode == ZeroBasedNarrowOop ||
|
||||
mode == HeapBasedNarrowOop, "mode is invalid");
|
||||
const size_t total_size = heap_size + heap_base_min_address_aligned;
|
||||
// Return specified base for the first request.
|
||||
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
|
||||
base = heap_base_min_address_aligned;
|
||||
|
||||
// If the total size is small enough to allow UnscaledNarrowOop then
|
||||
// just use UnscaledNarrowOop.
|
||||
} else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
|
||||
if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
|
||||
(Universe::narrow_oop_shift() == 0)) {
|
||||
// Use 32-bits oops without encoding and
|
||||
// place heap's top on the 4Gb boundary
|
||||
base = (UnscaledOopHeapMax - heap_size);
|
||||
} else {
|
||||
// Can't reserve with NarrowOopShift == 0
|
||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
|
||||
if (mode == UnscaledNarrowOop ||
|
||||
mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
|
||||
|
||||
// Use zero based compressed oops with encoding and
|
||||
// place heap's top on the 32Gb boundary in case
|
||||
// total_size > 4Gb or failed to reserve below 4Gb.
|
||||
uint64_t heap_top = OopEncodingHeapMax;
|
||||
|
||||
// For small heaps, save some space for compressed class pointer
|
||||
// space so it can be decoded with no base.
|
||||
if (UseCompressedClassPointers && !UseSharedSpaces &&
|
||||
OopEncodingHeapMax <= 32*G) {
|
||||
|
||||
uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
|
||||
assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
|
||||
alignment), "difference must be aligned too");
|
||||
uint64_t new_top = OopEncodingHeapMax-class_space;
|
||||
|
||||
if (total_size <= new_top) {
|
||||
heap_top = new_top;
|
||||
}
|
||||
}
|
||||
|
||||
// Align base to the adjusted top of the heap
|
||||
base = heap_top - heap_size;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
|
||||
// HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
|
||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
}
|
||||
|
||||
// Set narrow_oop_base and narrow_oop_use_implicit_null_checks
|
||||
// used in ReservedHeapSpace() constructors.
|
||||
// The final values will be set in initialize_heap() below.
|
||||
if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
|
||||
// Use zero based compressed oops
|
||||
Universe::set_narrow_oop_base(NULL);
|
||||
// Don't need guard page for implicit checks in indexed
|
||||
// addressing mode with zero based Compressed Oops.
|
||||
Universe::set_narrow_oop_use_implicit_null_checks(true);
|
||||
} else {
|
||||
// Set to a non-NULL value so the ReservedSpace ctor computes
|
||||
// the correct no-access prefix.
|
||||
// The final value will be set in initialize_heap() below.
|
||||
Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
|
||||
#if defined(_WIN64) || defined(AIX)
|
||||
if (UseLargePages) {
|
||||
// Cannot allocate guard pages for implicit checks in indexed
|
||||
// addressing mode when large pages are specified on windows.
|
||||
Universe::set_narrow_oop_use_implicit_null_checks(false);
|
||||
}
|
||||
#endif // _WIN64
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(is_ptr_aligned((char*)base, alignment), "Must be");
|
||||
return (char*)base; // also return NULL (don't care) for 32-bit VM
|
||||
}
|
||||
|
||||
jint Universe::initialize_heap() {
|
||||
|
||||
if (UseParallelGC) {
|
||||
@ -844,30 +747,13 @@ jint Universe::initialize_heap() {
|
||||
// See needs_explicit_null_check.
|
||||
// Only set the heap base for compressed oops because it indicates
|
||||
// compressed oops for pstack code.
|
||||
if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
|
||||
// Can't reserve heap below 32Gb.
|
||||
// keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
|
||||
if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
|
||||
// Didn't reserve heap below 4Gb. Must shift.
|
||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
#ifdef AIX
|
||||
// There is no protected page before the heap. This assures all oops
|
||||
// are decoded so that NULL is preserved, so this page will not be accessed.
|
||||
Universe::set_narrow_oop_use_implicit_null_checks(false);
|
||||
#endif
|
||||
} else {
|
||||
}
|
||||
if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
|
||||
// Did reserve heap below 32Gb. Can use base == 0;
|
||||
Universe::set_narrow_oop_base(0);
|
||||
#ifdef _WIN64
|
||||
if (!Universe::narrow_oop_use_implicit_null_checks()) {
|
||||
// Don't need guard page for implicit checks in indexed addressing
|
||||
// mode with zero based Compressed Oops.
|
||||
Universe::set_narrow_oop_use_implicit_null_checks(true);
|
||||
}
|
||||
#endif // _WIN64
|
||||
if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
|
||||
// Can't reserve heap below 4Gb.
|
||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
} else {
|
||||
Universe::set_narrow_oop_shift(0);
|
||||
}
|
||||
}
|
||||
|
||||
Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
|
||||
@ -875,6 +761,11 @@ jint Universe::initialize_heap() {
|
||||
if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
|
||||
Universe::print_compressed_oops_mode();
|
||||
}
|
||||
|
||||
// Tell tests in which mode we run.
|
||||
Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
|
||||
narrow_oop_mode_to_string(narrow_oop_mode()),
|
||||
false));
|
||||
}
|
||||
// Universe::narrow_oop_base() is one page below the heap.
|
||||
assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
|
||||
@ -903,22 +794,27 @@ void Universe::print_compressed_oops_mode() {
|
||||
tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
|
||||
|
||||
if (Universe::narrow_oop_base() != 0) {
|
||||
tty->print(":" PTR_FORMAT, Universe::narrow_oop_base());
|
||||
tty->print(": " PTR_FORMAT, Universe::narrow_oop_base());
|
||||
}
|
||||
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
|
||||
}
|
||||
|
||||
if (!Universe::narrow_oop_use_implicit_null_checks()) {
|
||||
tty->print(", no protected page in front of the heap");
|
||||
}
|
||||
|
||||
tty->cr();
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
// Reserve the Java heap, which is now the same for all GCs.
|
||||
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||
|
||||
assert(alignment <= Arguments::conservative_max_heap_alignment(),
|
||||
err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
|
||||
alignment, Arguments::conservative_max_heap_alignment()));
|
||||
|
||||
size_t total_reserved = align_size_up(heap_size, alignment);
|
||||
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
|
||||
"heap size is too big for compressed oops");
|
||||
@ -928,46 +824,31 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||
|| UseParallelGC
|
||||
|| use_large_pages, "Wrong alignment to use large pages");
|
||||
|
||||
char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
|
||||
// Now create the space.
|
||||
ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages);
|
||||
|
||||
ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
|
||||
if (total_rs.is_reserved()) {
|
||||
assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
|
||||
"must be exactly of required size and alignment");
|
||||
// We are good.
|
||||
|
||||
if (UseCompressedOops) {
|
||||
if (addr != NULL && !total_rs.is_reserved()) {
|
||||
// Failed to reserve at specified address - the requested memory
|
||||
// region is taken already, for example, by 'java' launcher.
|
||||
// Try again to reserver heap higher.
|
||||
addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
|
||||
|
||||
ReservedHeapSpace total_rs0(total_reserved, alignment,
|
||||
use_large_pages, addr);
|
||||
|
||||
if (addr != NULL && !total_rs0.is_reserved()) {
|
||||
// Failed to reserve at specified address again - give up.
|
||||
addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
|
||||
assert(addr == NULL, "");
|
||||
|
||||
ReservedHeapSpace total_rs1(total_reserved, alignment,
|
||||
use_large_pages, addr);
|
||||
total_rs = total_rs1;
|
||||
} else {
|
||||
total_rs = total_rs0;
|
||||
}
|
||||
if (UseCompressedOops) {
|
||||
// Universe::initialize_heap() will reset this to NULL if unscaled
|
||||
// or zero-based narrow oops are actually used.
|
||||
// Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
|
||||
Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
|
||||
}
|
||||
}
|
||||
|
||||
if (!total_rs.is_reserved()) {
|
||||
vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
|
||||
return total_rs;
|
||||
}
|
||||
|
||||
if (UseCompressedOops) {
|
||||
// Universe::initialize_heap() will reset this to NULL if unscaled
|
||||
// or zero-based narrow oops are actually used.
|
||||
address base = (address)(total_rs.base() - os::vm_page_size());
|
||||
Universe::set_narrow_oop_base(base);
|
||||
}
|
||||
return total_rs;
|
||||
vm_exit_during_initialization(
|
||||
err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap",
|
||||
total_reserved/K));
|
||||
|
||||
// satisfy compiler
|
||||
ShouldNotReachHere();
|
||||
return ReservedHeapSpace(0, 0, false);
|
||||
}
|
||||
|
||||
|
||||
@ -985,6 +866,8 @@ const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode)
|
||||
return "32-bit";
|
||||
case ZeroBasedNarrowOop:
|
||||
return "Zero based";
|
||||
case DisjointBaseNarrowOop:
|
||||
return "Non-zero disjoint base";
|
||||
case HeapBasedNarrowOop:
|
||||
return "Non-zero based";
|
||||
}
|
||||
@ -995,6 +878,10 @@ const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode)
|
||||
|
||||
|
||||
Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
|
||||
if (narrow_oop_base_disjoint()) {
|
||||
return DisjointBaseNarrowOop;
|
||||
}
|
||||
|
||||
if (narrow_oop_base() != 0) {
|
||||
return HeapBasedNarrowOop;
|
||||
}
|
||||
@ -1187,119 +1074,6 @@ void Universe::compute_base_vtable_size() {
|
||||
}
|
||||
|
||||
|
||||
// %%% The Universe::flush_foo methods belong in CodeCache.
|
||||
|
||||
// Flushes compiled methods dependent on dependee.
|
||||
void Universe::flush_dependents_on(instanceKlassHandle dependee) {
|
||||
assert_lock_strong(Compile_lock);
|
||||
|
||||
if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped during the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
KlassDepChange changes(dependee);
|
||||
|
||||
// Compute the dependent nmethods
|
||||
if (CodeCache::mark_for_deoptimization(changes) > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
VM_Deoptimize op;
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes compiled methods dependent on a particular CallSite
|
||||
// instance when its target is different than the given MethodHandle.
|
||||
void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
|
||||
assert_lock_strong(Compile_lock);
|
||||
|
||||
if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped during the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
CallSiteDepChange changes(call_site(), method_handle());
|
||||
|
||||
// Compute the dependent nmethods that have a reference to a
|
||||
// CallSite object. We use InstanceKlass::mark_dependent_nmethod
|
||||
// directly instead of CodeCache::mark_for_deoptimization because we
|
||||
// want dependents on the call site class only not all classes in
|
||||
// the ContextStream.
|
||||
int marked = 0;
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
|
||||
marked = call_site_klass->mark_dependent_nmethods(changes);
|
||||
}
|
||||
if (marked > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
VM_Deoptimize op;
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HOTSWAP
|
||||
// Flushes compiled methods dependent on dependee in the evolutionary sense
|
||||
void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
|
||||
// --- Compile_lock is not held. However we are at a safepoint.
|
||||
assert_locked_or_safepoint(Compile_lock);
|
||||
if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped during the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
// Compute the dependent nmethods
|
||||
if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
|
||||
// All this already happens inside a VM_Operation, so we'll do all the work here.
|
||||
// Stuff copied from VM_Deoptimize and modified slightly.
|
||||
|
||||
// We do not want any GCs to happen while we are in the middle of this VM operation
|
||||
ResourceMark rm;
|
||||
DeoptimizationMarker dm;
|
||||
|
||||
// Deoptimize all activations depending on marked nmethods
|
||||
Deoptimization::deoptimize_dependents();
|
||||
|
||||
// Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
|
||||
CodeCache::make_marked_nmethods_not_entrant();
|
||||
}
|
||||
}
|
||||
#endif // HOTSWAP
|
||||
|
||||
|
||||
// Flushes compiled methods dependent on dependee
|
||||
void Universe::flush_dependents_on_method(methodHandle m_h) {
|
||||
// --- Compile_lock is not held. However we are at a safepoint.
|
||||
assert_locked_or_safepoint(Compile_lock);
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped dring the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
// Compute the dependent nmethods
|
||||
if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
|
||||
// All this already happens inside a VM_Operation, so we'll do all the work here.
|
||||
// Stuff copied from VM_Deoptimize and modified slightly.
|
||||
|
||||
// We do not want any GCs to happen while we are in the middle of this VM operation
|
||||
ResourceMark rm;
|
||||
DeoptimizationMarker dm;
|
||||
|
||||
// Deoptimize all activations depending on marked nmethods
|
||||
Deoptimization::deoptimize_dependents();
|
||||
|
||||
// Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
|
||||
CodeCache::make_marked_nmethods_not_entrant();
|
||||
}
|
||||
}
|
||||
|
||||
void Universe::print() {
|
||||
print_on(gclog_or_tty);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -102,8 +102,8 @@ class Universe: AllStatic {
|
||||
friend class MarkSweep;
|
||||
friend class oopDesc;
|
||||
friend class ClassLoader;
|
||||
friend class Arguments;
|
||||
friend class SystemDictionary;
|
||||
friend class ReservedHeapSpace;
|
||||
friend class VMStructs;
|
||||
friend class VM_PopulateDumpSharedSpace;
|
||||
friend class Metaspace;
|
||||
@ -351,17 +351,40 @@ class Universe: AllStatic {
|
||||
// NarrowOopHeapBaseMin + heap_size < 4Gb
|
||||
// 1 - Use zero based compressed oops with encoding when
|
||||
// NarrowOopHeapBaseMin + heap_size < 32Gb
|
||||
// 2 - Use compressed oops with heap base + encoding.
|
||||
// 2 - Use compressed oops with disjoint heap base if
|
||||
// base is 32G-aligned and base > 0. This allows certain
|
||||
// optimizations in encoding/decoding.
|
||||
// Disjoint: Bits used in base are disjoint from bits used
|
||||
// for oops ==> oop = (cOop << 3) | base. One can disjoint
|
||||
// the bits of an oop into base and compressed oop.
|
||||
// 3 - Use compressed oops with heap base + encoding.
|
||||
enum NARROW_OOP_MODE {
|
||||
UnscaledNarrowOop = 0,
|
||||
ZeroBasedNarrowOop = 1,
|
||||
HeapBasedNarrowOop = 2
|
||||
DisjointBaseNarrowOop = 2,
|
||||
HeapBasedNarrowOop = 3,
|
||||
AnyNarrowOopMode = 4
|
||||
};
|
||||
static NARROW_OOP_MODE narrow_oop_mode();
|
||||
static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
|
||||
static char* preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode);
|
||||
static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
|
||||
static address narrow_oop_base() { return _narrow_oop._base; }
|
||||
static address narrow_oop_base() { return _narrow_oop._base; }
|
||||
// Test whether bits of addr and possible offsets into the heap overlap.
|
||||
static bool is_disjoint_heap_base_address(address addr) {
|
||||
return (((uint64_t)(intptr_t)addr) &
|
||||
(((uint64_t)UCONST64(0xFFFFffffFFFFffff)) >> (32-LogMinObjAlignmentInBytes))) == 0;
|
||||
}
|
||||
// Check for disjoint base compressed oops.
|
||||
static bool narrow_oop_base_disjoint() {
|
||||
return _narrow_oop._base != NULL && is_disjoint_heap_base_address(_narrow_oop._base);
|
||||
}
|
||||
// Check for real heapbased compressed oops.
|
||||
// We must subtract the base as the bits overlap.
|
||||
// If we negate above function, we also get unscaled and zerobased.
|
||||
static bool narrow_oop_base_overlaps() {
|
||||
return _narrow_oop._base != NULL && !is_disjoint_heap_base_address(_narrow_oop._base);
|
||||
}
|
||||
static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
|
||||
static int narrow_oop_shift() { return _narrow_oop._shift; }
|
||||
static bool narrow_oop_use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
|
||||
@ -461,16 +484,6 @@ class Universe: AllStatic {
|
||||
static uintptr_t verify_mark_bits() PRODUCT_RETURN0;
|
||||
static uintptr_t verify_mark_mask() PRODUCT_RETURN0;
|
||||
|
||||
// Flushing and deoptimization
|
||||
static void flush_dependents_on(instanceKlassHandle dependee);
|
||||
static void flush_dependents_on(Handle call_site, Handle method_handle);
|
||||
#ifdef HOTSWAP
|
||||
// Flushing and deoptimization in case of evolution
|
||||
static void flush_evol_dependents_on(instanceKlassHandle dependee);
|
||||
#endif // HOTSWAP
|
||||
// Support for fullspeed debugging
|
||||
static void flush_dependents_on_method(methodHandle dependee);
|
||||
|
||||
// Compiler support
|
||||
static int base_vtable_size() { return _base_vtable_size; }
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -100,17 +100,21 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
|
||||
vtable_length = Universe::base_vtable_size();
|
||||
}
|
||||
|
||||
if (super == NULL && !Universe::is_bootstrapping() &&
|
||||
vtable_length != Universe::base_vtable_size()) {
|
||||
// Someone is attempting to redefine java.lang.Object incorrectly. The
|
||||
// only way this should happen is from
|
||||
// SystemDictionary::resolve_from_stream(), which will detect this later
|
||||
// and throw a security exception. So don't assert here to let
|
||||
// the exception occur.
|
||||
vtable_length = Universe::base_vtable_size();
|
||||
if (super == NULL && vtable_length != Universe::base_vtable_size()) {
|
||||
if (Universe::is_bootstrapping()) {
|
||||
// Someone is attempting to override java.lang.Object incorrectly on the
|
||||
// bootclasspath. The JVM cannot recover from this error including throwing
|
||||
// an exception
|
||||
vm_exit_during_initialization("Incompatible definition of java.lang.Object");
|
||||
} else {
|
||||
// Someone is attempting to redefine java.lang.Object incorrectly. The
|
||||
// only way this should happen is from
|
||||
// SystemDictionary::resolve_from_stream(), which will detect this later
|
||||
// and throw a security exception. So don't assert here to let
|
||||
// the exception occur.
|
||||
vtable_length = Universe::base_vtable_size();
|
||||
}
|
||||
}
|
||||
assert(super != NULL || vtable_length == Universe::base_vtable_size(),
|
||||
"bad vtable size for class Object");
|
||||
assert(vtable_length % vtableEntry::size() == 0, "bad vtable length");
|
||||
assert(vtable_length >= Universe::base_vtable_size(), "vtable too small");
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/metadataOnStackMark.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/debugInfoRec.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "interpreter/bytecodeStream.hpp"
|
||||
@ -1727,7 +1728,7 @@ void BreakpointInfo::set(Method* method) {
|
||||
// Deoptimize all dependents on this method
|
||||
HandleMark hm(thread);
|
||||
methodHandle mh(thread, method);
|
||||
Universe::flush_dependents_on_method(mh);
|
||||
CodeCache::flush_dependents_on_method(mh);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -433,6 +433,13 @@ public:
|
||||
// NullCheck oop_reg
|
||||
//
|
||||
inline static bool gen_narrow_oop_implicit_null_checks() {
|
||||
// Advice matcher to perform null checks on the narrow oop side.
|
||||
// Implicit checks are not possible on the uncompressed oop side anyway
|
||||
// (at least not for read accesses).
|
||||
// Performs significantly better (especially on Power 6).
|
||||
if (!os::zero_page_read_protected()) {
|
||||
return true;
|
||||
}
|
||||
return Universe::narrow_oop_use_implicit_null_checks() &&
|
||||
(narrow_oop_use_complex_address() ||
|
||||
Universe::narrow_oop_base() != NULL);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3766,7 +3766,7 @@ void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) {
|
||||
// All dependencies have been recorded from startup or this is a second or
|
||||
// subsequent use of RedefineClasses
|
||||
if (JvmtiExport::all_dependencies_are_recorded()) {
|
||||
Universe::flush_evol_dependents_on(k_h);
|
||||
CodeCache::flush_evol_dependents_on(k_h);
|
||||
} else {
|
||||
CodeCache::mark_all_nmethods_for_deoptimization();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/oopMapCache.hpp"
|
||||
@ -1245,7 +1246,7 @@ JVM_ENTRY(void, MHN_setCallSiteTargetNormal(JNIEnv* env, jobject igcls, jobject
|
||||
{
|
||||
// Walk all nmethods depending on this call site.
|
||||
MutexLocker mu(Compile_lock, thread);
|
||||
Universe::flush_dependents_on(call_site, target);
|
||||
CodeCache::flush_dependents_on(call_site, target);
|
||||
java_lang_invoke_CallSite::set_target(call_site(), target());
|
||||
}
|
||||
}
|
||||
@ -1257,7 +1258,7 @@ JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobjec
|
||||
{
|
||||
// Walk all nmethods depending on this call site.
|
||||
MutexLocker mu(Compile_lock, thread);
|
||||
Universe::flush_dependents_on(call_site, target);
|
||||
CodeCache::flush_dependents_on(call_site, target);
|
||||
java_lang_invoke_CallSite::set_target_volatile(call_site(), target());
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -176,11 +176,11 @@ WB_END
|
||||
|
||||
WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
|
||||
size_t granularity = os::vm_allocation_granularity();
|
||||
ReservedHeapSpace rhs(100 * granularity, granularity, false, NULL);
|
||||
ReservedHeapSpace rhs(100 * granularity, granularity, false);
|
||||
VirtualSpace vs;
|
||||
vs.initialize(rhs, 50 * granularity);
|
||||
|
||||
//Check if constraints are complied
|
||||
// Check if constraints are complied
|
||||
if (!( UseCompressedOops && rhs.base() != NULL &&
|
||||
Universe::narrow_oop_base() != NULL &&
|
||||
Universe::narrow_oop_use_implicit_null_checks() )) {
|
||||
@ -203,7 +203,7 @@ WB_END
|
||||
static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
|
||||
size_t magnitude, size_t iterations) {
|
||||
size_t granularity = os::vm_allocation_granularity();
|
||||
ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false, NULL);
|
||||
ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false);
|
||||
VirtualSpace vs;
|
||||
if (!vs.initialize(rhs, 0)) {
|
||||
tty->print_cr("Failed to initialize VirtualSpace. Can't proceed.");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1522,15 +1522,6 @@ void Arguments::set_use_compressed_oops() {
|
||||
FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||
}
|
||||
#endif
|
||||
#ifdef _WIN64
|
||||
if (UseLargePages && UseCompressedOops) {
|
||||
// Cannot allocate guard pages for implicit checks in indexed addressing
|
||||
// mode, when large pages are specified on windows.
|
||||
// This flag could be switched ON if narrow oop base address is set to 0,
|
||||
// see code in Universe::initialize_heap().
|
||||
Universe::set_narrow_oop_use_implicit_null_checks(false);
|
||||
}
|
||||
#endif // _WIN64
|
||||
} else {
|
||||
if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||
warning("Max heap size too large for Compressed Oops");
|
||||
@ -2416,6 +2407,7 @@ bool Arguments::check_vm_args_consistency() {
|
||||
#ifdef COMPILER1
|
||||
status = status && verify_min_value(ValueMapInitialSize, 1, "ValueMapInitialSize");
|
||||
#endif
|
||||
status = status && verify_min_value(HeapSearchSteps, 1, "HeapSearchSteps");
|
||||
|
||||
if (PrintNMTStatistics) {
|
||||
#if INCLUDE_NMT
|
||||
@ -4102,6 +4094,10 @@ void Arguments::PropertyList_add(SystemProperty** plist, const char* k, char* v)
|
||||
PropertyList_add(plist, new_p);
|
||||
}
|
||||
|
||||
void Arguments::PropertyList_add(SystemProperty *element) {
|
||||
PropertyList_add(&_system_properties, element);
|
||||
}
|
||||
|
||||
// This add maintains unique property key in the list.
|
||||
void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, char* v, jboolean append) {
|
||||
if (plist == NULL)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -572,6 +572,7 @@ class Arguments : AllStatic {
|
||||
static void init_version_specific_system_properties();
|
||||
|
||||
// Property List manipulation
|
||||
static void PropertyList_add(SystemProperty *element);
|
||||
static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
|
||||
static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
|
||||
static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -531,6 +531,11 @@ class CommandLineFlags {
|
||||
product_pd(uintx, HeapBaseMinAddress, \
|
||||
"OS specific low limit for heap base address") \
|
||||
\
|
||||
product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \
|
||||
"Heap allocation steps through preferred address regions to find" \
|
||||
" where it can allocate the heap. Number of steps to take per " \
|
||||
"region.") \
|
||||
\
|
||||
diagnostic(bool, PrintCompressedOopsMode, false, \
|
||||
"Print compressed oops base address and encoding mode") \
|
||||
\
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,21 +43,19 @@ ReservedSpace::ReservedSpace(size_t size) {
|
||||
// Don't force the alignment to be large page aligned,
|
||||
// since that will waste memory.
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
initialize(size, alignment, large_pages, NULL, 0, false);
|
||||
initialize(size, alignment, large_pages, NULL, false);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
|
||||
bool large,
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix) {
|
||||
initialize(size+noaccess_prefix, alignment, large, requested_address,
|
||||
noaccess_prefix, false);
|
||||
char* requested_address) {
|
||||
initialize(size, alignment, large, requested_address, false);
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
|
||||
bool large,
|
||||
bool executable) {
|
||||
initialize(size, alignment, large, NULL, 0, executable);
|
||||
initialize(size, alignment, large, NULL, executable);
|
||||
}
|
||||
|
||||
// Helper method.
|
||||
@ -91,7 +89,6 @@ static bool failed_to_reserve_as_requested(char* base, char* requested_address,
|
||||
|
||||
void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix,
|
||||
bool executable) {
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
assert((size & (granularity - 1)) == 0,
|
||||
@ -103,10 +100,6 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
|
||||
alignment = MAX2(alignment, (size_t)os::vm_page_size());
|
||||
|
||||
// Assert that if noaccess_prefix is used, it is the same as alignment.
|
||||
assert(noaccess_prefix == 0 ||
|
||||
noaccess_prefix == alignment, "noaccess prefix wrong");
|
||||
|
||||
_base = NULL;
|
||||
_size = 0;
|
||||
_special = false;
|
||||
@ -122,11 +115,6 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
bool special = large && !os::can_commit_large_page_memory();
|
||||
char* base = NULL;
|
||||
|
||||
if (requested_address != 0) {
|
||||
requested_address -= noaccess_prefix; // adjust requested address
|
||||
assert(requested_address != NULL, "huge noaccess prefix?");
|
||||
}
|
||||
|
||||
if (special) {
|
||||
|
||||
base = os::reserve_memory_special(size, alignment, requested_address, executable);
|
||||
@ -176,7 +164,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
if (base == NULL) return;
|
||||
|
||||
// Check alignment constraints
|
||||
if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
|
||||
if ((((size_t)base) & (alignment - 1)) != 0) {
|
||||
// Base not aligned, retry
|
||||
if (!os::release_memory(base, size)) fatal("os::release_memory failed");
|
||||
// Make sure that size is aligned
|
||||
@ -197,16 +185,6 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
|
||||
_base = base;
|
||||
_size = size;
|
||||
_alignment = alignment;
|
||||
_noaccess_prefix = noaccess_prefix;
|
||||
|
||||
// Assert that if noaccess_prefix is used, it is the same as alignment.
|
||||
assert(noaccess_prefix == 0 ||
|
||||
noaccess_prefix == _alignment, "noaccess prefix wrong");
|
||||
|
||||
assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
}
|
||||
|
||||
|
||||
@ -276,54 +254,336 @@ void ReservedSpace::release() {
|
||||
_base = NULL;
|
||||
_size = 0;
|
||||
_noaccess_prefix = 0;
|
||||
_alignment = 0;
|
||||
_special = false;
|
||||
_executable = false;
|
||||
}
|
||||
}
|
||||
|
||||
void ReservedSpace::protect_noaccess_prefix(const size_t size) {
|
||||
assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
|
||||
(Universe::narrow_oop_base() != NULL) &&
|
||||
Universe::narrow_oop_use_implicit_null_checks()),
|
||||
"noaccess_prefix should be used only with non zero based compressed oops");
|
||||
static size_t noaccess_prefix_size(size_t alignment) {
|
||||
return lcm(os::vm_page_size(), alignment);
|
||||
}
|
||||
|
||||
// If there is no noaccess prefix, return.
|
||||
if (_noaccess_prefix == 0) return;
|
||||
void ReservedHeapSpace::establish_noaccess_prefix() {
|
||||
assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
|
||||
_noaccess_prefix = noaccess_prefix_size(_alignment);
|
||||
|
||||
assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
|
||||
"must be at least page size big");
|
||||
|
||||
// Protect memory at the base of the allocated region.
|
||||
// If special, the page was committed (only matters on windows)
|
||||
if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
|
||||
_special)) {
|
||||
fatal("cannot protect protection page");
|
||||
}
|
||||
if (PrintCompressedOopsMode) {
|
||||
tty->cr();
|
||||
tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
|
||||
if (base() && base() + _size > (char *)OopEncodingHeapMax) {
|
||||
if (true
|
||||
WIN64_ONLY(&& !UseLargePages)
|
||||
AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
|
||||
// Protect memory at the base of the allocated region.
|
||||
// If special, the page was committed (only matters on windows)
|
||||
if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
|
||||
fatal("cannot protect protection page");
|
||||
}
|
||||
if (PrintCompressedOopsMode) {
|
||||
tty->cr();
|
||||
tty->print_cr("Protected page at the reserved heap base: "
|
||||
PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
|
||||
}
|
||||
assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
|
||||
} else {
|
||||
Universe::set_narrow_oop_use_implicit_null_checks(false);
|
||||
}
|
||||
}
|
||||
|
||||
_base += _noaccess_prefix;
|
||||
_size -= _noaccess_prefix;
|
||||
assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
|
||||
"must be exactly of required size and alignment");
|
||||
assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
|
||||
}
|
||||
|
||||
ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
|
||||
bool large, char* requested_address) :
|
||||
ReservedSpace(size, alignment, large,
|
||||
requested_address,
|
||||
(UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
|
||||
Universe::narrow_oop_use_implicit_null_checks()) ?
|
||||
lcm(os::vm_page_size(), alignment) : 0) {
|
||||
// Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
|
||||
// Does not check whether the reserved memory actually is at requested_address, as the memory returned
|
||||
// might still fulfill the wishes of the caller.
|
||||
// Assures the memory is aligned to 'alignment'.
|
||||
// NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
|
||||
void ReservedHeapSpace::try_reserve_heap(size_t size,
|
||||
size_t alignment,
|
||||
bool large,
|
||||
char* requested_address) {
|
||||
if (_base != NULL) {
|
||||
// We tried before, but we didn't like the address delivered.
|
||||
release();
|
||||
}
|
||||
|
||||
// If OS doesn't support demand paging for large page memory, we need
|
||||
// to use reserve_memory_special() to reserve and pin the entire region.
|
||||
bool special = large && !os::can_commit_large_page_memory();
|
||||
char* base = NULL;
|
||||
|
||||
if (PrintCompressedOopsMode && Verbose) {
|
||||
tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
|
||||
requested_address, (address)size);
|
||||
}
|
||||
|
||||
if (special) {
|
||||
base = os::reserve_memory_special(size, alignment, requested_address, false);
|
||||
|
||||
if (base != NULL) {
|
||||
// Check alignment constraints.
|
||||
assert((uintptr_t) base % alignment == 0,
|
||||
err_msg("Large pages returned a non-aligned address, base: "
|
||||
PTR_FORMAT " alignment: " PTR_FORMAT,
|
||||
base, (void*)(uintptr_t)alignment));
|
||||
_special = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (base == NULL) {
|
||||
// Failed; try to reserve regular memory below
|
||||
if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
|
||||
if (PrintCompressedOopsMode) {
|
||||
tty->cr();
|
||||
tty->print_cr("Reserve regular memory without large pages.");
|
||||
}
|
||||
}
|
||||
|
||||
// Optimistically assume that the OSes returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
|
||||
// If the memory was requested at a particular address, use
|
||||
// os::attempt_reserve_memory_at() to avoid over mapping something
|
||||
// important. If available space is not detected, return NULL.
|
||||
|
||||
if (requested_address != 0) {
|
||||
base = os::attempt_reserve_memory_at(size, requested_address);
|
||||
} else {
|
||||
base = os::reserve_memory(size, NULL, alignment);
|
||||
}
|
||||
}
|
||||
if (base == NULL) { return; }
|
||||
|
||||
// Done
|
||||
_base = base;
|
||||
_size = size;
|
||||
_alignment = alignment;
|
||||
|
||||
// Check alignment constraints
|
||||
if ((((size_t)base) & (alignment - 1)) != 0) {
|
||||
// Base not aligned, retry.
|
||||
release();
|
||||
}
|
||||
}
|
||||
|
||||
void ReservedHeapSpace::try_reserve_range(char *highest_start,
|
||||
char *lowest_start,
|
||||
size_t attach_point_alignment,
|
||||
char *aligned_heap_base_min_address,
|
||||
char *upper_bound,
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
bool large) {
|
||||
const size_t attach_range = highest_start - lowest_start;
|
||||
// Cap num_attempts at possible number.
|
||||
// At least one is possible even for 0 sized attach range.
|
||||
const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
|
||||
const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
|
||||
|
||||
const size_t stepsize = (attach_range == 0) ? // Only one try.
|
||||
(size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
|
||||
|
||||
// Try attach points from top to bottom.
|
||||
char* attach_point = highest_start;
|
||||
while (attach_point >= lowest_start &&
|
||||
attach_point <= highest_start && // Avoid wrap around.
|
||||
((_base == NULL) ||
|
||||
(_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
|
||||
try_reserve_heap(size, alignment, large, attach_point);
|
||||
attach_point -= stepsize;
|
||||
}
|
||||
}
|
||||
|
||||
#define SIZE_64K ((uint64_t) UCONST64( 0x10000))
|
||||
#define SIZE_256M ((uint64_t) UCONST64( 0x10000000))
|
||||
#define SIZE_32G ((uint64_t) UCONST64( 0x800000000))
|
||||
|
||||
// Helper for heap allocation. Returns an array with addresses
|
||||
// (OS-specific) which are suited for disjoint base mode. Array is
|
||||
// NULL terminated.
|
||||
static char** get_attach_addresses_for_disjoint_mode() {
|
||||
static uint64_t addresses[] = {
|
||||
2 * SIZE_32G,
|
||||
3 * SIZE_32G,
|
||||
4 * SIZE_32G,
|
||||
8 * SIZE_32G,
|
||||
10 * SIZE_32G,
|
||||
1 * SIZE_64K * SIZE_32G,
|
||||
2 * SIZE_64K * SIZE_32G,
|
||||
3 * SIZE_64K * SIZE_32G,
|
||||
4 * SIZE_64K * SIZE_32G,
|
||||
16 * SIZE_64K * SIZE_32G,
|
||||
32 * SIZE_64K * SIZE_32G,
|
||||
34 * SIZE_64K * SIZE_32G,
|
||||
0
|
||||
};
|
||||
|
||||
// Sort out addresses smaller than HeapBaseMinAddress. This assumes
|
||||
// the array is sorted.
|
||||
uint i = 0;
|
||||
while (addresses[i] != 0 &&
|
||||
(addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
|
||||
i++;
|
||||
}
|
||||
uint start = i;
|
||||
|
||||
// Avoid more steps than requested.
|
||||
i = 0;
|
||||
while (addresses[start+i] != 0) {
|
||||
if (i == HeapSearchSteps) {
|
||||
addresses[start+i] = 0;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
return (char**) &addresses[start];
|
||||
}
|
||||
|
||||
void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
|
||||
guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
|
||||
"can not allocate compressed oop heap for this size");
|
||||
guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
|
||||
assert(HeapBaseMinAddress > 0, "sanity");
|
||||
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
assert((size & (granularity - 1)) == 0,
|
||||
"size not aligned to os::vm_allocation_granularity()");
|
||||
assert((alignment & (granularity - 1)) == 0,
|
||||
"alignment not aligned to os::vm_allocation_granularity()");
|
||||
assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
|
||||
"not a power of 2");
|
||||
|
||||
// The necessary attach point alignment for generated wish addresses.
|
||||
// This is needed to increase the chance of attaching for mmap and shmat.
|
||||
const size_t os_attach_point_alignment =
|
||||
AIX_ONLY(SIZE_256M) // Known shm boundary alignment.
|
||||
NOT_AIX(os::vm_allocation_granularity());
|
||||
const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
|
||||
|
||||
char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
|
||||
size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
|
||||
noaccess_prefix_size(alignment) : 0;
|
||||
|
||||
// Attempt to alloc at user-given address.
|
||||
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
|
||||
try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
|
||||
if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
|
||||
release();
|
||||
}
|
||||
}
|
||||
|
||||
// Keep heap at HeapBaseMinAddress.
|
||||
if (_base == NULL) {
|
||||
|
||||
// Try to allocate the heap at addresses that allow efficient oop compression.
|
||||
// Different schemes are tried, in order of decreasing optimization potential.
|
||||
//
|
||||
// For this, try_reserve_heap() is called with the desired heap base addresses.
|
||||
// A call into the os layer to allocate at a given address can return memory
|
||||
// at a different address than requested. Still, this might be memory at a useful
|
||||
// address. try_reserve_heap() always returns this allocated memory, as only here
|
||||
// the criteria for a good heap are checked.
|
||||
|
||||
// Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
|
||||
// Give it several tries from top of range to bottom.
|
||||
if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
|
||||
|
||||
// Calc address range within we try to attach (range of possible start addresses).
|
||||
char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
|
||||
char* const lowest_start = (char *)align_ptr_up ( aligned_heap_base_min_address , attach_point_alignment);
|
||||
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
|
||||
aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
|
||||
}
|
||||
|
||||
// zerobased: Attempt to allocate in the lower 32G.
|
||||
// But leave room for the compressed class pointers, which is allocated above
|
||||
// the heap.
|
||||
char *zerobased_max = (char *)OopEncodingHeapMax;
|
||||
// For small heaps, save some space for compressed class pointer
|
||||
// space so it can be decoded with no base.
|
||||
if (UseCompressedClassPointers && !UseSharedSpaces &&
|
||||
OopEncodingHeapMax <= KlassEncodingMetaspaceMax) {
|
||||
const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
|
||||
zerobased_max = (char *)OopEncodingHeapMax - class_space;
|
||||
}
|
||||
|
||||
// Give it several tries from top of range to bottom.
|
||||
if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
|
||||
((_base == NULL) || // No previous try succeeded.
|
||||
(_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
|
||||
|
||||
// Calc address range within we try to attach (range of possible start addresses).
|
||||
char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
|
||||
// SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
|
||||
// "Cannot use int to initialize char*." Introduce aux variable.
|
||||
char *unscaled_end = (char *)UnscaledOopHeapMax;
|
||||
unscaled_end -= size;
|
||||
char *lowest_start = (size < UnscaledOopHeapMax) ?
|
||||
MAX2(unscaled_end, aligned_heap_base_min_address) : aligned_heap_base_min_address;
|
||||
lowest_start = (char *)align_ptr_up(lowest_start, attach_point_alignment);
|
||||
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
|
||||
aligned_heap_base_min_address, zerobased_max, size, alignment, large);
|
||||
}
|
||||
|
||||
// Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
|
||||
// implement null checks.
|
||||
noaccess_prefix = noaccess_prefix_size(alignment);
|
||||
|
||||
// Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
|
||||
char** addresses = get_attach_addresses_for_disjoint_mode();
|
||||
int i = 0;
|
||||
while (addresses[i] && // End of array not yet reached.
|
||||
((_base == NULL) || // No previous try succeeded.
|
||||
(_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
|
||||
!Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
|
||||
char* const attach_point = addresses[i];
|
||||
assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
|
||||
try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
|
||||
i++;
|
||||
}
|
||||
|
||||
// Last, desperate try without any placement.
|
||||
if (_base == NULL) {
|
||||
if (PrintCompressedOopsMode && Verbose) {
|
||||
tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
|
||||
}
|
||||
initialize(size + noaccess_prefix, alignment, large, NULL, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
|
||||
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Heap size should be aligned to alignment, too.
|
||||
guarantee(is_size_aligned(size, alignment), "set by caller");
|
||||
|
||||
if (UseCompressedOops) {
|
||||
initialize_compressed_heap(size, alignment, large);
|
||||
if (_size > size) {
|
||||
// We allocated heap with noaccess prefix.
|
||||
// It can happen we get a zerobased/unscaled heap with noaccess prefix,
|
||||
// if we had to try at arbitrary address.
|
||||
establish_noaccess_prefix();
|
||||
}
|
||||
} else {
|
||||
initialize(size, alignment, large, NULL, false);
|
||||
}
|
||||
|
||||
assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
|
||||
"area must be distinguishable from marks for mark-sweep");
|
||||
|
||||
if (base() > 0) {
|
||||
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
|
||||
}
|
||||
|
||||
// Only reserved space for the java heap should have a noaccess_prefix
|
||||
// if using compressed oops.
|
||||
protect_noaccess_prefix(size);
|
||||
}
|
||||
|
||||
// Reserve space for code segment. Same as Java heap only we mark this as
|
||||
@ -791,8 +1051,7 @@ class TestReservedSpace : AllStatic {
|
||||
ReservedSpace rs(size, // size
|
||||
alignment, // alignment
|
||||
UseLargePages, // large
|
||||
NULL, // requested_address
|
||||
0); // noacces_prefix
|
||||
(char *)NULL); // requested_address
|
||||
|
||||
test_log(" rs.special() == %d", rs.special());
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,33 +31,29 @@
|
||||
|
||||
class ReservedSpace VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
protected:
|
||||
char* _base;
|
||||
size_t _size;
|
||||
size_t _noaccess_prefix;
|
||||
size_t _alignment;
|
||||
bool _special;
|
||||
private:
|
||||
bool _executable;
|
||||
|
||||
// ReservedSpace
|
||||
ReservedSpace(char* base, size_t size, size_t alignment, bool special,
|
||||
bool executable);
|
||||
protected:
|
||||
void initialize(size_t size, size_t alignment, bool large,
|
||||
char* requested_address,
|
||||
const size_t noaccess_prefix,
|
||||
bool executable);
|
||||
|
||||
protected:
|
||||
// Create protection page at the beginning of the space.
|
||||
void protect_noaccess_prefix(const size_t size);
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
ReservedSpace();
|
||||
ReservedSpace(size_t size);
|
||||
ReservedSpace(size_t size, size_t alignment, bool large,
|
||||
char* requested_address = NULL,
|
||||
const size_t noaccess_prefix = 0);
|
||||
char* requested_address = NULL);
|
||||
ReservedSpace(size_t size, size_t alignment, bool large, bool executable);
|
||||
|
||||
// Accessors
|
||||
@ -98,12 +94,23 @@ ReservedSpace ReservedSpace::last_part(size_t partition_size)
|
||||
return last_part(partition_size, alignment());
|
||||
}
|
||||
|
||||
// Class encapsulating behavior specific of memory space reserved for Java heap
|
||||
// Class encapsulating behavior specific of memory space reserved for Java heap.
|
||||
class ReservedHeapSpace : public ReservedSpace {
|
||||
public:
|
||||
// Constructor
|
||||
ReservedHeapSpace(size_t size, size_t forced_base_alignment,
|
||||
bool large, char* requested_address);
|
||||
private:
|
||||
void try_reserve_heap(size_t size, size_t alignment, bool large,
|
||||
char *requested_address);
|
||||
void try_reserve_range(char *highest_start, char *lowest_start,
|
||||
size_t attach_point_alignment, char *aligned_HBMA,
|
||||
char *upper_bound, size_t size, size_t alignment, bool large);
|
||||
void initialize_compressed_heap(const size_t size, size_t alignment, bool large);
|
||||
// Create protection page at the beginning of the space.
|
||||
void establish_noaccess_prefix();
|
||||
public:
|
||||
// Constructor. Tries to find a heap that is good for compressed oops.
|
||||
ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large);
|
||||
// Returns the base to be used for compression, i.e. so that null can be
|
||||
// encoded safely and implicit null checks can work.
|
||||
char *compressed_oop_base() { return _base - _noaccess_prefix; }
|
||||
};
|
||||
|
||||
// Class encapsulating behavior specific memory space for Code
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -124,9 +124,6 @@ extern int LogBitsPerHeapOop;
|
||||
extern int BytesPerHeapOop;
|
||||
extern int BitsPerHeapOop;
|
||||
|
||||
// Oop encoding heap max
|
||||
extern uint64_t OopEncodingHeapMax;
|
||||
|
||||
const int BitsPerJavaInteger = 32;
|
||||
const int BitsPerJavaLong = 64;
|
||||
const int BitsPerSize_t = size_tSize * BitsPerByte;
|
||||
@ -195,7 +192,6 @@ inline size_t heap_word_size(size_t byte_size) {
|
||||
return (byte_size + (HeapWordSize-1)) >> LogHeapWordSize;
|
||||
}
|
||||
|
||||
|
||||
const size_t K = 1024;
|
||||
const size_t M = K*K;
|
||||
const size_t G = M*K;
|
||||
@ -397,8 +393,17 @@ const int LogKlassAlignment = LogKlassAlignmentInBytes - LogHeapWordSize;
|
||||
const int KlassAlignmentInBytes = 1 << LogKlassAlignmentInBytes;
|
||||
const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize;
|
||||
|
||||
// Klass encoding metaspace max size
|
||||
const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
|
||||
// Maximal size of heap where unscaled compression can be used. Also upper bound
|
||||
// for heap placement: 4GB.
|
||||
const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
|
||||
// Maximal size of heap where compressed oops can be used. Also upper bound for heap
|
||||
// placement for zero based compression algorithm: UnscaledOopHeapMax << LogMinObjAlignmentInBytes.
|
||||
extern uint64_t OopEncodingHeapMax;
|
||||
|
||||
// Maximal size of compressed class space. Above this limit compression is not possible.
|
||||
// Also upper bound for placement of zero based class space. (Class space is further limited
|
||||
// to be < 3G, see arguments.cpp.)
|
||||
const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
|
||||
|
||||
// Machine dependent stuff
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -172,21 +172,21 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
|
||||
#define offset_of(klass,field) (size_t)((intx)&(((klass*)16)->field) - 16)
|
||||
|
||||
// Some constant sizes used throughout the AIX port
|
||||
#define SIZE_1K ((uint64_t) 0x400ULL)
|
||||
#define SIZE_4K ((uint64_t) 0x1000ULL)
|
||||
#define SIZE_64K ((uint64_t) 0x10000ULL)
|
||||
#define SIZE_1M ((uint64_t) 0x100000ULL)
|
||||
#define SIZE_4M ((uint64_t) 0x400000ULL)
|
||||
#define SIZE_8M ((uint64_t) 0x800000ULL)
|
||||
#define SIZE_16M ((uint64_t) 0x1000000ULL)
|
||||
#define SIZE_256M ((uint64_t) 0x10000000ULL)
|
||||
#define SIZE_1G ((uint64_t) 0x40000000ULL)
|
||||
#define SIZE_2G ((uint64_t) 0x80000000ULL)
|
||||
#define SIZE_4G ((uint64_t) 0x100000000ULL)
|
||||
#define SIZE_16G ((uint64_t) 0x400000000ULL)
|
||||
#define SIZE_32G ((uint64_t) 0x800000000ULL)
|
||||
#define SIZE_64G ((uint64_t) 0x1000000000ULL)
|
||||
#define SIZE_1T ((uint64_t) 0x10000000000ULL)
|
||||
#define SIZE_1K ((uint64_t) UCONST64( 0x400))
|
||||
#define SIZE_4K ((uint64_t) UCONST64( 0x1000))
|
||||
#define SIZE_64K ((uint64_t) UCONST64( 0x10000))
|
||||
#define SIZE_1M ((uint64_t) UCONST64( 0x100000))
|
||||
#define SIZE_4M ((uint64_t) UCONST64( 0x400000))
|
||||
#define SIZE_8M ((uint64_t) UCONST64( 0x800000))
|
||||
#define SIZE_16M ((uint64_t) UCONST64( 0x1000000))
|
||||
#define SIZE_256M ((uint64_t) UCONST64( 0x10000000))
|
||||
#define SIZE_1G ((uint64_t) UCONST64( 0x40000000))
|
||||
#define SIZE_2G ((uint64_t) UCONST64( 0x80000000))
|
||||
#define SIZE_4G ((uint64_t) UCONST64( 0x100000000))
|
||||
#define SIZE_16G ((uint64_t) UCONST64( 0x400000000))
|
||||
#define SIZE_32G ((uint64_t) UCONST64( 0x800000000))
|
||||
#define SIZE_64G ((uint64_t) UCONST64( 0x1000000000))
|
||||
#define SIZE_1T ((uint64_t) UCONST64(0x10000000000))
|
||||
|
||||
|
||||
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
|
||||
|
44
hotspot/test/runtime/BadObjectClass/BootstrapRedefine.java
Normal file
44
hotspot/test/runtime/BadObjectClass/BootstrapRedefine.java
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 6583051
|
||||
* @summary Give error if java.lang.Object has been incompatibly overridden on the bootpath
|
||||
* @library /testlibrary
|
||||
* @compile Object.java
|
||||
* @run main BootstrapRedefine
|
||||
*/
|
||||
|
||||
import com.oracle.java.testlibrary.*;
|
||||
|
||||
public class BootstrapRedefine {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
String testClasses = System.getProperty("test.classes", ".");
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xbootclasspath/p:" + testClasses, "-version");
|
||||
new OutputAnalyzer(pb.start())
|
||||
.shouldContain("Incompatible definition of java.lang.Object")
|
||||
.shouldHaveExitValue(1);
|
||||
}
|
||||
}
|
37
hotspot/test/runtime/BadObjectClass/Object.java
Normal file
37
hotspot/test/runtime/BadObjectClass/Object.java
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package java.lang;
|
||||
|
||||
/**
|
||||
* This is a fake java.lang.Object class.
|
||||
*/
|
||||
public class Object {
|
||||
|
||||
// Add some methods
|
||||
void dummy1() { return; }
|
||||
void dummy2() { return; }
|
||||
void dummy3() { return; }
|
||||
}
|
@ -35,20 +35,42 @@ import com.oracle.java.testlibrary.*;
|
||||
public class UseCompressedOops {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
testCompressedOopsModesGCs();
|
||||
testCompressedOopsModesGCs("-XX:+UseLargePages");
|
||||
}
|
||||
|
||||
public static void testCompressedOopsModesGCs(String... flags) throws Exception {
|
||||
ArrayList<String> args = new ArrayList<>();
|
||||
Collections.addAll(args, flags);
|
||||
|
||||
// Test default.
|
||||
testCompressedOopsModes(args);
|
||||
// Test GCs.
|
||||
testCompressedOopsModes(args, "-XX:+UseG1GC");
|
||||
testCompressedOopsModes(args, "-XX:+UseConcMarkSweepGC");
|
||||
testCompressedOopsModes(args, "-XX:+UseSerialGC");
|
||||
testCompressedOopsModes(args, "-XX:+UseParallelGC");
|
||||
testCompressedOopsModes(args, "-XX:+UseParallelOldGC");
|
||||
}
|
||||
|
||||
public static void testCompressedOopsModes(ArrayList<String> flags1, String... flags2) throws Exception {
|
||||
ArrayList<String> args = new ArrayList<>();
|
||||
args.addAll(flags1);
|
||||
Collections.addAll(args, flags2);
|
||||
|
||||
if (Platform.is64bit()) {
|
||||
// Explicitly turn of compressed oops
|
||||
testCompressedOops("-XX:-UseCompressedOops", "-Xmx32m")
|
||||
testCompressedOops(args, "-XX:-UseCompressedOops", "-Xmx32m")
|
||||
.shouldNotContain("Compressed Oops")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// Compressed oops should be on by default
|
||||
testCompressedOops("-Xmx32m")
|
||||
testCompressedOops(args, "-Xmx32m")
|
||||
.shouldContain("Compressed Oops mode")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// Explicly enabling compressed oops
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx32m")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32m")
|
||||
.shouldContain("Compressed Oops mode")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
@ -58,68 +80,89 @@ public class UseCompressedOops {
|
||||
// puts the heap way up, forcing different behaviour.
|
||||
if (!Platform.isOSX() && !Platform.isSolaris()) {
|
||||
// Larger than 4gb heap should result in zero based with shift 3
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx5g")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx5g")
|
||||
.shouldContain("Zero based")
|
||||
.shouldContain("Oop shift amount: 3")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// Larger than 3gb heap and HeapBaseMinAddress=1g should result in zero based with shift 3
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx3200m", "-XX:HeapBaseMinAddress=1g")
|
||||
.shouldContain("Zero based")
|
||||
.shouldContain("Oop shift amount: 3")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// Small heap above 4gb should result in zero based with shift 3
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx32m", "-XX:HeapBaseMinAddress=4g")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32m", "-XX:HeapBaseMinAddress=4g")
|
||||
.shouldContain("Zero based")
|
||||
.shouldContain("Oop shift amount: 3")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// Small heap above 32gb should result in non-zero based with shift 3
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx32m", "-XX:HeapBaseMinAddress=32g")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32m", "-XX:HeapBaseMinAddress=32g")
|
||||
.shouldContain("Non-zero disjoint base")
|
||||
.shouldContain("Oop shift amount: 3")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// Small heap above 32gb should result in non-zero based with shift 3
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32m", "-XX:HeapBaseMinAddress=72704m")
|
||||
.shouldContain("Non-zero based")
|
||||
.shouldContain("Oop shift amount: 3")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// 32gb heap with heap base above 64gb and object alignment set to 16 bytes should result
|
||||
// in non-zero based with shift 4
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx32g", "-XX:ObjectAlignmentInBytes=16",
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32g", "-XX:ObjectAlignmentInBytes=16",
|
||||
"-XX:HeapBaseMinAddress=64g")
|
||||
.shouldContain("Non-zero based")
|
||||
.shouldContain("Non-zero disjoint base")
|
||||
.shouldContain("Oop shift amount: 4")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// 32gb heap with object alignment set to 16 bytes should result in zero based with shift 4
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx32g", "-XX:ObjectAlignmentInBytes=16")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32g", "-XX:ObjectAlignmentInBytes=16")
|
||||
.shouldContain("Zero based")
|
||||
.shouldContain("Oop shift amount: 4")
|
||||
.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
// This is a pathologic case for the heap allocation algorithm. Regression test.
|
||||
// HeapBaseMinAddress must be 2g and should not be set on the command line.
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx2g")
|
||||
.shouldNotContain("Max heap size too large for Compressed Oops")
|
||||
.shouldHaveExitValue(0);
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx29g", "-XX:CompressedClassSpaceSize=1g")
|
||||
.shouldNotContain("Max heap size too large for Compressed Oops")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// Explicitly enabling compressed oops with 32gb heap should result a warning
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx32g")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32g")
|
||||
.shouldContain("Max heap size too large for Compressed Oops")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// 32gb heap should not result a warning
|
||||
testCompressedOops("-Xmx32g")
|
||||
testCompressedOops(args, "-Xmx32g")
|
||||
.shouldNotContain("Max heap size too large for Compressed Oops")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// Explicitly enabling compressed oops with 32gb heap and object
|
||||
// alignment set to 8 byte should result a warning
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx32g", "-XX:ObjectAlignmentInBytes=8")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32g", "-XX:ObjectAlignmentInBytes=8")
|
||||
.shouldContain("Max heap size too large for Compressed Oops")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
// 64gb heap and object alignment set to 16 bytes should result in a warning
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx64g", "-XX:ObjectAlignmentInBytes=16")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx64g", "-XX:ObjectAlignmentInBytes=16")
|
||||
.shouldContain("Max heap size too large for Compressed Oops")
|
||||
.shouldHaveExitValue(0);
|
||||
|
||||
} else {
|
||||
// Compressed oops should only apply to 64bit platforms
|
||||
testCompressedOops("-XX:+UseCompressedOops", "-Xmx32m")
|
||||
testCompressedOops(args, "-XX:+UseCompressedOops", "-Xmx32m")
|
||||
.shouldContain("Unrecognized VM option 'UseCompressedOops'")
|
||||
.shouldHaveExitValue(1);
|
||||
}
|
||||
}
|
||||
|
||||
private static OutputAnalyzer testCompressedOops(String... flags) throws Exception {
|
||||
private static OutputAnalyzer testCompressedOops(ArrayList<String> flags1, String... flags2) throws Exception {
|
||||
ArrayList<String> args = new ArrayList<>();
|
||||
|
||||
// Always run with these three:
|
||||
@ -128,7 +171,8 @@ public class UseCompressedOops {
|
||||
args.add("-Xms32m");
|
||||
|
||||
// Add the extra flags
|
||||
Collections.addAll(args, flags);
|
||||
args.addAll(flags1);
|
||||
Collections.addAll(args, flags2);
|
||||
|
||||
args.add("-version");
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user